code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE : int = 'CLIPImageProcessor'
SCREAMING_SNAKE_CASE : Union[str, Any] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : int ,lowercase__ : int=None ,lowercase__ : List[str]=None ,**lowercase__ : Tuple ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,lowercase__ ,)
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ ,lowercase__ )
def __call__( self : List[str] ,lowercase__ : List[Any]=None ,lowercase__ : Tuple=None ,lowercase__ : List[str]=None ,**lowercase__ : Union[str, Any] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if images is not None:
__lowercase = self.image_processor(lowercase__ ,return_tensors=lowercase__ ,**lowercase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) ,tensor_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,*lowercase__ : Dict ,**lowercase__ : Optional[int] ):
return self.tokenizer.batch_decode(*lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,*lowercase__ : List[str] ,**lowercase__ : Dict ):
return self.tokenizer.decode(*lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'data2vec-text'
def __init__( self , SCREAMING_SNAKE_CASE_=30522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = classifier_dropout
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def UpperCamelCase( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 42 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 | 0 |
import collections
import os
import re
from pathlib import Path
lowerCAmelCase = 'src/transformers'
# Matches is_xxx_available()
lowerCAmelCase = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCAmelCase = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCAmelCase = re.compile(R'^\s*try:')
# Catches a line with else:
lowerCAmelCase = re.compile(R'^\s*else:')
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
lowercase__ = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
lowercase__ = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(''', ''' )
lowercase__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(''', ''' )
lowercase__ = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = '''base imports''' if key == '''none''' else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def _a ( ):
"""simple docstring"""
lowercase__ = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' )
lowercase__ = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase__ = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE ) )
def _a ( ):
"""simple docstring"""
lowercase__ = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase__ = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
lowerCAmelCase = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def _a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
lowercase__ = direct_transformers_import(SCREAMING_SNAKE_CASE )
lowercase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' ) , '''r''' ) as f:
lowercase__ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE ) ) )
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '''\n'''.join(f'- {module}' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f'{list_of_modules}\n'
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 43 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11 | 0 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase__ :
def __init__( self : int,__A : List[str],__A : Tuple,__A : str,__A : str,__A : List[str],__A : int=0.2,__A : List[str]=0.2 ):
_lowerCamelCase : int = bp_numa
_lowerCamelCase : Optional[int] = bp_numa
_lowerCamelCase : Optional[int] = bp_numa
_lowerCamelCase : Union[str, Any] = conva_get[:2]
_lowerCamelCase : Any = conva_get[2]
_lowerCamelCase : int = size_pa
_lowerCamelCase : Any = rate_w
_lowerCamelCase : Any = rate_t
_lowerCamelCase : str = [
np.mat(-1 * np.random.rand(self.conva[0],self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_lowerCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa,self.num_bpa ) + 0.5 )
_lowerCamelCase : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa,self.num_bpa ) + 0.5 )
_lowerCamelCase : Any = -2 * np.random.rand(self.conva[1] ) + 1
_lowerCamelCase : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
_lowerCamelCase : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
def lowerCamelCase_ ( self : Tuple,__A : int ):
# save model dict with pickle
_lowerCamelCase : Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__A,"wb" ) as f:
pickle.dump(__A,__A )
print(f'Model saved: {save_path}' )
@classmethod
def lowerCamelCase_ ( cls : Any,__A : Dict ):
# read saved model
with open(__A,"rb" ) as f:
_lowerCamelCase : List[str] = pickle.load(__A ) # noqa: S301
_lowerCamelCase : Tuple = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
_lowerCamelCase : List[str] = model_dic.get("size_pooling1" )
_lowerCamelCase : Dict = model_dic.get("num_bp1" )
_lowerCamelCase : List[str] = model_dic.get("num_bp2" )
_lowerCamelCase : Optional[Any] = model_dic.get("num_bp3" )
_lowerCamelCase : str = model_dic.get("rate_weight" )
_lowerCamelCase : Any = model_dic.get("rate_thre" )
# create model instance
_lowerCamelCase : Union[str, Any] = CNN(__A,__A,__A,__A,__A,__A,__A )
# modify model parameter
_lowerCamelCase : Dict = model_dic.get("w_conv1" )
_lowerCamelCase : Optional[int] = model_dic.get("wkj" )
_lowerCamelCase : Optional[Any] = model_dic.get("vji" )
_lowerCamelCase : Dict = model_dic.get("thre_conv1" )
_lowerCamelCase : Tuple = model_dic.get("thre_bp2" )
_lowerCamelCase : Optional[int] = model_dic.get("thre_bp3" )
return conv_ins
def lowerCamelCase_ ( self : Optional[Any],__A : Optional[Any] ):
return 1 / (1 + np.exp(-1 * x ))
def lowerCamelCase_ ( self : Dict,__A : str ):
return round(__A,3 )
def lowerCamelCase_ ( self : str,__A : int,__A : Any,__A : Union[str, Any],__A : Dict,__A : List[Any] ):
# convolution process
_lowerCamelCase : Optional[Any] = convs[0]
_lowerCamelCase : List[Any] = convs[1]
_lowerCamelCase : int = np.shape(__A )[0]
# get the data slice of original image data, data_focus
_lowerCamelCase : Tuple = []
for i_focus in range(0,size_data - size_conv + 1,__A ):
for j_focus in range(0,size_data - size_conv + 1,__A ):
_lowerCamelCase : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__A )
# calculate the feature map of every single kernel, and saved as list of matrix
_lowerCamelCase : Dict = []
_lowerCamelCase : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__A ):
_lowerCamelCase : Optional[int] = []
for i_focus in range(len(__A ) ):
_lowerCamelCase : List[Any] = (
np.sum(np.multiply(data_focus[i_focus],w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__A ) )
_lowerCamelCase : Union[str, Any] = np.asmatrix(__A ).reshape(
__A,__A )
data_featuremap.append(__A )
# expanding the data slice to One dimenssion
_lowerCamelCase : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__A ) )
_lowerCamelCase : Dict = np.asarray(__A )
return focus_list, data_featuremap
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[Any],__A : int="average_pool" ):
# pooling process
_lowerCamelCase : Tuple = len(featuremaps[0] )
_lowerCamelCase : Tuple = int(size_map / size_pooling )
_lowerCamelCase : int = []
for i_map in range(len(__A ) ):
_lowerCamelCase : Optional[Any] = featuremaps[i_map]
_lowerCamelCase : int = []
for i_focus in range(0,__A,__A ):
for j_focus in range(0,__A,__A ):
_lowerCamelCase : int = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__A ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__A ) )
_lowerCamelCase : Optional[Any] = np.asmatrix(__A ).reshape(__A,__A )
featuremap_pooled.append(__A )
return featuremap_pooled
def lowerCamelCase_ ( self : Optional[Any],__A : List[str] ):
# expanding three dimension data to one dimension list
_lowerCamelCase : Union[str, Any] = []
for i in range(len(__A ) ):
_lowerCamelCase : int = np.shape(data[i] )
_lowerCamelCase : List[str] = data[i].reshape(1,shapes[0] * shapes[1] )
_lowerCamelCase : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__A )
_lowerCamelCase : Tuple = np.asarray(__A )
return data_expanded
def lowerCamelCase_ ( self : Tuple,__A : Optional[int] ):
# expanding matrix to one dimension list
_lowerCamelCase : int = np.asarray(__A )
_lowerCamelCase : List[str] = np.shape(__A )
_lowerCamelCase : Tuple = data_mat.reshape(1,shapes[0] * shapes[1] )
return data_expanded
def lowerCamelCase_ ( self : List[str],__A : Any,__A : List[str],__A : List[Any],__A : Any,__A : Tuple ):
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[Any] = 0
for i_map in range(__A ):
_lowerCamelCase : List[Any] = np.ones((size_map, size_map) )
for i in range(0,__A,__A ):
for j in range(0,__A,__A ):
_lowerCamelCase : int = pd_pool[
i_pool
]
_lowerCamelCase : Dict = i_pool + 1
_lowerCamelCase : Any = np.multiply(
__A,np.multiply(out_map[i_map],(1 - out_map[i_map]) ) )
pd_all.append(__A )
return pd_all
def lowerCamelCase_ ( self : Union[str, Any],__A : Dict,__A : Optional[Any],__A : Union[str, Any],__A : Optional[Any],__A : Union[str, Any],__A : Tuple=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__A )) )
print((" - - Shape: Teach_Data ", np.shape(__A )) )
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : List[str] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_lowerCamelCase : List[Any] = 0
print(f'-------------Learning Time {rp}--------------' )
for p in range(len(__A ) ):
# print('------------Learning Image: %d--------------'%p)
_lowerCamelCase : List[str] = np.asmatrix(datas_train[p] )
_lowerCamelCase : Dict = np.asarray(datas_teach[p] )
_lowerCamelCase , _lowerCamelCase : int = self.convolute(
__A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
_lowerCamelCase : int = self.pooling(__A,self.size_poolinga )
_lowerCamelCase : List[str] = np.shape(__A )
_lowerCamelCase : Optional[Any] = self._expand(__A )
_lowerCamelCase : List[str] = data_bp_input
_lowerCamelCase : Union[str, Any] = np.dot(__A,self.vji.T ) - self.thre_bpa
_lowerCamelCase : Optional[int] = self.sig(__A )
_lowerCamelCase : Union[str, Any] = np.dot(__A,self.wkj.T ) - self.thre_bpa
_lowerCamelCase : List[Any] = self.sig(__A )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowerCamelCase : Tuple = np.multiply(
(data_teach - bp_outa),np.multiply(__A,(1 - bp_outa) ) )
_lowerCamelCase : List[str] = np.multiply(
np.dot(__A,self.wkj ),np.multiply(__A,(1 - bp_outa) ) )
_lowerCamelCase : List[Any] = np.dot(__A,self.vji )
_lowerCamelCase : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowerCamelCase : int = pd_conva_pooled.T.getA().tolist()
_lowerCamelCase : Optional[int] = self._calculate_gradient_from_pool(
__A,__A,shape_featuremapa[0],shape_featuremapa[1],self.size_poolinga,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_lowerCamelCase : str = self._expand_mat(pd_conva_all[k_conv] )
_lowerCamelCase : Optional[int] = self.rate_weight * np.dot(__A,__A )
_lowerCamelCase : Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_lowerCamelCase : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_lowerCamelCase : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowerCamelCase : Dict = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowerCamelCase : List[Any] = self.thre_bpa - pd_k_all * self.rate_thre
_lowerCamelCase : Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowerCamelCase : List[str] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowerCamelCase : List[Any] = rp + 1
_lowerCamelCase : str = error_count / patterns
all_mse.append(__A )
def draw_error():
_lowerCamelCase : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__A,"+-" )
plt.plot(__A,"r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__A,alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def lowerCamelCase_ ( self : int,__A : List[Any] ):
# model predict
_lowerCamelCase : Any = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__A )) )
for p in range(len(__A ) ):
_lowerCamelCase : Optional[int] = np.asmatrix(datas_test[p] )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.convolute(
__A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
_lowerCamelCase : Optional[int] = self.pooling(__A,self.size_poolinga )
_lowerCamelCase : int = self._expand(__A )
_lowerCamelCase : Any = data_bp_input
_lowerCamelCase : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
_lowerCamelCase : Tuple = self.sig(__A )
_lowerCamelCase : Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
_lowerCamelCase : str = self.sig(__A )
produce_out.extend(bp_outa.getA().tolist() )
_lowerCamelCase : Union[str, Any] = [list(map(self.do_round,__A ) ) for each in produce_out]
return np.asarray(__A )
def lowerCamelCase_ ( self : Any,__A : str ):
# return the data of image after convoluting process so we can check it out
_lowerCamelCase : Any = np.asmatrix(__A )
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.convolute(
__A,self.conva,self.w_conva,self.thre_conva,conv_step=self.step_conva,)
_lowerCamelCase : Optional[int] = self.pooling(__A,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass | 44 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
import torch
def A ( ) -> Optional[int]:
if torch.cuda.is_available():
UpperCamelCase__ :int = torch.cuda.device_count()
else:
UpperCamelCase__ :List[str] = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main() | 45 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = r'''.*/layers_(\d+)'''
_a = key
if re.match(__A , __A):
_a = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __A)
_a = r'''(encoder|decoder)\/'''
if re.match(__A , __A):
_a = re.match(__A , __A).groups()
if groups[0] == "encoder":
_a = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __A)
elif groups[0] == "decoder":
_a = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __A)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(__A):
_a = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(__A)
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import regex as re
with open(__A , '''r''') as f:
_a = f.read()
_a = re.findall(r'''(.*) = ([0-9.]*)''' , __A)
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(__A) if '''.''' in value else int(__A)
_a = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __A)[0]
_a = str(activation[1])
_a = num_experts
_a = SwitchTransformersConfig(**__A)
return config
def lowerCAmelCase (__A , __A , __A=None , __A="./" , __A=8):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_a = checkpoints.load_tax_checkpoint(__A)
if gin_file is not None:
_a = convert_gin_to_config(__A , __A)
else:
_a = SwitchTransformersConfig.from_pretrained(__A)
_a = SwitchTransformersForConditionalGeneration(__A)
_a = flax_params['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = rename_keys(__A)
_a = unflatten_dict(__A , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 11 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : list[list[int]] = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_lowerCamelCase : Optional[int] = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowerCAmelCase : Optional[Any] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
_lowerCAmelCase : Dict = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''') | 46 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 0 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 47 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
for char in word:
lowerCAmelCase__ = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def A ( UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = set()
for token in tokens:
lowerCAmelCase__ = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
lowerCAmelCase__ = list(UpperCamelCase_ )
return word_list
def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : set() ) -> str:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowerCAmelCase__ = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
lowerCAmelCase__ = bert_tokens
lowerCAmelCase__ ,lowerCAmelCase__ = 0, len(UpperCamelCase_ )
while start < end:
lowerCAmelCase__ = True
if is_chinese(bert_word[start] ):
lowerCAmelCase__ = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
lowerCAmelCase__ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase__ = "##" + bert_word[j]
lowerCAmelCase__ = start + i
lowerCAmelCase__ = False
break
if single_word:
start += 1
return bert_word
def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : LTP , UpperCamelCase_ : BertTokenizer ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
for i in range(0 , len(UpperCamelCase_ ) , 1_00 ):
lowerCAmelCase__ = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws
lowerCAmelCase__ = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCAmelCase__ = []
for i in range(0 , len(UpperCamelCase_ ) , 1_00 ):
lowerCAmelCase__ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCAmelCase__ = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = []
for id in input_ids:
lowerCAmelCase__ = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
lowerCAmelCase__ = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
lowerCAmelCase__ = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def A ( UpperCamelCase_ : List[str] ) -> str:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase__ = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
lowerCAmelCase__ = [json.dumps(UpperCamelCase_ ) + "\n" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Any = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
main(args)
| 48 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
'''simple docstring'''
def __init__(self ) -> Dict:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def a__ (self , A ) -> Dict:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class __A ( A ):
'''simple docstring'''
def a__ (self , A , *A , **A ) -> Optional[Any]:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __A ( A ):
'''simple docstring'''
def a__ (self , A , A ) -> int:
"""simple docstring"""
return output + 1
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
self.assertEqual(test_model._hf_hook , A )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
add_hook_to_module(A , A , append=A )
self.assertEqual(isinstance(test_model._hf_hook , A ) , A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(x + 1 )
_a = test_model(x + 2 )
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , A , atol=1E-5 )
def a__ (self ) -> str:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , output + 2 , atol=1E-5 )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a = True
_a = test_model(A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(A , AlignDevicesHook(io_same_device=A ) )
_a = torch.randn(2 , 3 ).to(0 )
_a = model(A )
self.assertEqual(output.device , torch.device(0 ) )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
_a = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(A , execution_device=A , offload=A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(A , execution_device=A , offload=A , offload_buffers=A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() , offload_buffers=A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 11 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Any = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'blip_text_model'
def __init__( self ,_lowerCAmelCase=3_05_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=8 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0 ,_lowerCAmelCase=1_02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
super().__init__(
pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,sep_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = encoder_hidden_size
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = hidden_act
lowerCamelCase__ = initializer_range
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = is_decoder
lowerCamelCase__ = use_cache
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
lowerCamelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'blip_vision_model'
def __init__( self ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=3_84 ,_lowerCAmelCase=16 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-10 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = patch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = hidden_act
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
lowerCamelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'blip'
_UpperCamelCase = True
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2.6592 ,_lowerCAmelCase=2_56 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
if text_config is None:
lowerCamelCase__ = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase__ = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
lowerCamelCase__ = BlipTextConfig(**_lowerCAmelCase )
lowerCamelCase__ = BlipVisionConfig(**_lowerCAmelCase )
lowerCamelCase__ = self.vision_config.hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = logit_scale_init_value
lowerCamelCase__ = 1.0
lowerCamelCase__ = 0.02
lowerCamelCase__ = image_text_hidden_size
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 50 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = act_dim
_a = state_dim
_a = hidden_size
_a = max_length
_a = is_training
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_a = random_attention_mask((self.batch_size, self.seq_length) )
_a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ (self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ (self , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = DecisionTransformerModel(config=A )
model.to(A )
model.eval()
_a = model(A , A , A , A , A , A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] = ()
__lowerCamelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = DecisionTransformerModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DecisionTransformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A )] , A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 2 # number of steps of autoregressive prediction we will perform
_a = 10 # defined by the RL environment, may be normalized
_a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_a = model.to(A )
_a = model.config
torch.manual_seed(0 )
_a = torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ) # env.reset()
_a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A )
_a = torch.tensor(A , device=A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a = state
_a = torch.zeros(1 , 0 , config.act_dim , device=A , dtype=torch.floataa )
_a = torch.zeros(1 , 0 , device=A , dtype=torch.floataa )
_a = torch.tensor(0 , device=A , dtype=torch.long ).reshape(1 , 1 )
for step in range(A ):
_a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A )] , dim=1 )
_a = torch.cat([rewards, torch.zeros(1 , 1 , device=A )] , dim=1 )
_a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a , _a , _a = model(
states=A , actions=A , rewards=A , returns_to_go=A , timesteps=A , attention_mask=A , return_dict=A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a , _a , _a , _a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ),
1.0,
False,
{},
)
_a = action_pred[0, -1]
_a = torch.cat([states, state] , dim=1 )
_a = returns_to_go[0, -1] - reward
_a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a = torch.cat(
[timesteps, torch.ones((1, 1) , device=A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 11 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a__ : str , a__ : Union[str, Any]=13 , a__ : Any=32 , a__ : int=2 , a__ : str=3 , a__ : List[Any]=16 , a__ : Tuple=[1, 2, 1] , a__ : Union[str, Any]=[2, 2, 4] , a__ : Optional[int]=2 , a__ : str=2.0 , a__ : int=True , a__ : Any=0.0 , a__ : Optional[Any]=0.0 , a__ : int=0.1 , a__ : List[str]="gelu" , a__ : Optional[Any]=False , a__ : Tuple=True , a__ : int=0.02 , a__ : Tuple=1e-5 , a__ : str=True , a__ : Optional[Any]=None , a__ : List[str]=True , a__ : Tuple=10 , a__ : int=8 , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = patch_norm
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = is_training
UpperCAmelCase = scope
UpperCAmelCase = use_labels
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = encoder_stride
def __snake_case ( self : Any ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : str ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __snake_case ( self : str , a__ : str , a__ : List[str] , a__ : Dict ):
UpperCAmelCase = SwinvaModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Union[str, Any] , a__ : int ):
UpperCAmelCase = SwinvaForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = SwinvaForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __snake_case ( self : Tuple , a__ : Dict , a__ : Tuple , a__ : int ):
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = SwinvaForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_lowerCamelCase =(
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Dict ):
UpperCAmelCase = SwinvaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , embed_dim=37 )
def __snake_case ( self : Union[str, Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Tuple ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __snake_case ( self : Dict ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __snake_case ( self : int ):
pass
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def __snake_case ( self : Any ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.attentions
UpperCAmelCase = len(self.model_tester.depths )
self.assertEqual(len(a__ ) , a__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = config.window_size**2
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase = len(a__ )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
UpperCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase = 2
self.assertEqual(out_len + added_hidden_states , len(a__ ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __snake_case ( self : Tuple , a__ : Optional[Any] , a__ : List[str] , a__ : Any , a__ : Tuple ):
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# Swinv2 has a different seq_length
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = reshaped_hidden_states[0].shape
UpperCAmelCase = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def __snake_case ( self : Any ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __snake_case ( self : Optional[Any] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SwinvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __snake_case ( self : str ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(a__ )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
a__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**a__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
UpperCAmelCase = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 51 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
return len(set(__A)) == len(__A)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A = 16
A = 32
def __A ( a_ :Accelerator , a_ :int = 16) -> Any:
__a : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''')
__a : List[str] = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(a_ :List[str]):
# max_length=None => use the model max length (it's actually the default)
__a : List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a_ , max_length=a_)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : str = datasets.map(
a_ , batched=a_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(a_ :Dict):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
__a : int = 8
else:
__a : Tuple = None
return tokenizer.pad(
a_ , padding='''longest''' , max_length=a_ , pad_to_multiple_of=a_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__a : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_)
__a : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A = mocked_dataloaders # noqa: F811
def __A ( a_ :Union[str, Any] , a_ :str) -> Union[str, Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , a_) == "1":
__a : List[str] = 2
# Initialize accelerator
__a : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Any = config['''lr''']
__a : Dict = int(config['''num_epochs'''])
__a : List[str] = int(config['''seed'''])
__a : Union[str, Any] = int(config['''batch_size'''])
__a : Tuple = evaluate.load('''glue''' , '''mrpc''')
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=a_)
def inner_training_loop(a_ :List[str]):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(a_)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=a_)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : str = model.to(accelerator.device)
# Instantiate optimizer
__a : int = AdamW(params=model.parameters() , lr=a_)
__a , __a : int = get_dataloaders(a_ , a_)
# Instantiate scheduler
__a : Any = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=1_00 , num_training_steps=(len(a_) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_)
# Now we train the model
for epoch in range(a_):
model.train()
for step, batch in enumerate(a_):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
__a : str = model(**a_)
__a : Optional[Any] = outputs.loss
accelerator.backward(a_)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
__a : List[str] = model(**a_)
__a : List[str] = outputs.logits.argmax(dim=-1)
__a , __a : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']))
metric.add_batch(
predictions=a_ , references=a_ , )
__a : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , a_)
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __A ( ) -> str:
__a : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''')
parser.add_argument(
'''--mixed_precision''' , type=a_ , default=a_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''')
__a : Optional[Any] = parser.parse_args()
__a : int = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(a_ , a_)
if __name__ == "__main__":
main() | 52 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if len(__A) == 0:
return False
_a = len(__A) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __A)
else:
return binary_search(a_list[midpoint + 1 :] , __A)
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be found in the list:\n").strip())
lowercase_ = "" if binary_search(sequence, target) else "not "
print(F"""{target} was {not_str}found in {sequence}""")
| 11 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any=1_3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Union[str, Any]=2_2_4 , lowerCAmelCase_ : List[str]=1_0_0_0 , lowerCAmelCase_ : Union[str, Any]=[3, 3, 6, 4] , lowerCAmelCase_ : List[str]=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Any:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = num_labels
__lowerCAmelCase = image_size
__lowerCAmelCase = layer_depths
__lowerCAmelCase = embed_dims
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Optional[Any] ) -> Optional[int]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase_ , layer_scale_init_value=1e-5 , )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> int:
__lowerCAmelCase = SwiftFormerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__lowerCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : str ) -> Any:
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) = self.prepare_config_and_inputs()
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a_ = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = SwiftFormerModelTester(self )
__lowerCAmelCase = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def lowercase ( self : Optional[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds' )
def lowercase ( self : List[str] ) -> Tuple:
pass
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase ( self : str ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Union[str, Any] ) -> int:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = SwiftFormerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='SwiftFormer does not output attentions' )
def lowercase ( self : List[Any] ) -> Union[str, Any]:
pass
def lowercase ( self : Any ) -> int:
def check_hidden_states_output(lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = 8
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> List[str]:
def _config_zero_init(lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , 1e-10 )
if isinstance(getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ):
__lowerCAmelCase = _config_zero_init(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return configs_no_init
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = _config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Any ) -> List[str]:
pass
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : Dict ) -> Tuple:
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None
@slow
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 |
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = len(A )
_a = [0] * len_array
if len_array > 0:
_a = array[0]
for i in range(1 , A ):
_a = self.prefix_sum[i - 1] + array[i]
def a__ (self , A , A ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self , A ) -> bool:
"""simple docstring"""
_a = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a__ ( *lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" ) as fh:
fcntl.flock(lowercase__ , fcntl.LOCK_EX )
try:
print(*lowercase__ )
finally:
fcntl.flock(lowercase__ , fcntl.LOCK_UN )
__lowercase : str =int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
__lowercase : Any =torch.device("""cuda""", local_rank)
__lowercase : Optional[Any] =socket.gethostname()
__lowercase : Tuple =f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase : Optional[Any] =dist.get_rank()
__lowercase : Any =dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 54 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A)
if n > 1:
factors.append(__A)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE :Dict = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "esm"
def __init__( self : List[str] ,A : Dict=None ,A : Tuple=None ,A : Any=None ,A : Optional[Any]=7_68 ,A : Tuple=12 ,A : List[str]=12 ,A : Tuple=30_72 ,A : List[str]=0.1 ,A : List[Any]=0.1 ,A : int=10_26 ,A : List[str]=0.02 ,A : Union[str, Any]=1E-12 ,A : List[Any]="absolute" ,A : List[Any]=True ,A : Union[str, Any]=None ,A : Optional[int]=False ,A : Dict=False ,A : Tuple=None ,A : Optional[int]=None ,**A : List[Any] ,):
super().__init__(pad_token_id=A ,mask_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = use_cache
__A = emb_layer_norm_before
__A = token_dropout
__A = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
__A = EsmFoldConfig()
elif isinstance(A ,A ):
__A = EsmFoldConfig(**A )
__A = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
__A = get_default_vocab_list()
else:
__A = vocab_list
else:
__A = None
__A = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,A ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def UpperCamelCase_ ( self : Optional[int] ):
__A = super().to_dict()
if isinstance(self.esmfold_config ,A ):
__A = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = None
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = 128
snake_case_ = None
def UpperCamelCase_ ( self : List[Any] ):
if self.trunk is None:
__A = TrunkConfig()
elif isinstance(self.trunk ,A ):
__A = TrunkConfig(**self.trunk )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = asdict(self )
__A = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 48
snake_case_ = 1024
snake_case_ = 128
snake_case_ = 32
snake_case_ = 32
snake_case_ = 32
snake_case_ = 0
snake_case_ = 0
snake_case_ = False
snake_case_ = 4
snake_case_ = 128
snake_case_ = None
def UpperCamelCase_ ( self : List[Any] ):
if self.structure_module is None:
__A = StructureModuleConfig()
elif isinstance(self.structure_module ,A ):
__A = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__A = self.sequence_state_dim // self.sequence_head_width
__A = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCamelCase_ ( self : Tuple ):
__A = asdict(self )
__A = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 384
snake_case_ = 128
snake_case_ = 16
snake_case_ = 128
snake_case_ = 12
snake_case_ = 4
snake_case_ = 8
snake_case_ = 0.1
snake_case_ = 8
snake_case_ = 1
snake_case_ = 2
snake_case_ = 7
snake_case_ = 10
snake_case_ = 1E-8
snake_case_ = 1E5
def UpperCamelCase_ ( self : Union[str, Any] ):
return asdict(self )
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 55 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_a : Optional[int] = False
_a : Dict = False
def _a (lowercase__ : Namespace ) -> int:
"""simple docstring"""
return TrainCommand(lowercase__ )
class _lowercase ( __lowercase ):
@staticmethod
def a ( SCREAMING_SNAKE_CASE_ : ArgumentParser ) -> Any:
__snake_case = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=SCREAMING_SNAKE_CASE_ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=SCREAMING_SNAKE_CASE_ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=SCREAMING_SNAKE_CASE_ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=SCREAMING_SNAKE_CASE_ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=SCREAMING_SNAKE_CASE_ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=SCREAMING_SNAKE_CASE_ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=SCREAMING_SNAKE_CASE_ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=SCREAMING_SNAKE_CASE_ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=SCREAMING_SNAKE_CASE_ , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE_ , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=SCREAMING_SNAKE_CASE_ , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Namespace ) -> Optional[int]:
__snake_case = logging.get_logger('transformers-cli/training' )
__snake_case = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE_ )
__snake_case = args.output
__snake_case = args.column_label
__snake_case = args.column_text
__snake_case = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
__snake_case = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
__snake_case = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
__snake_case = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = args.validation_split
__snake_case = args.train_batch_size
__snake_case = args.valid_batch_size
__snake_case = args.learning_rate
__snake_case = args.adam_epsilon
def a ( self : Optional[int] ) -> int:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def a ( self : Tuple ) -> List[str]:
raise NotImplementedError
def a ( self : Dict ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 56 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ = logging.getLogger(__name__)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if os.path.exists(__A):
if os.path.exists(os.path.join(__A , '''config.json''')) and os.path.isfile(
os.path.join(__A , '''config.json''')):
os.remove(os.path.join(__A , '''config.json'''))
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''')) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''')):
os.remove(os.path.join(__A , '''pytorch_model.bin'''))
else:
os.makedirs(__A)
model.save_pretrained(__A)
def lowerCAmelCase (__A , __A=False):
"""simple docstring"""
_a = 2
if unlogit:
_a = torch.pow(__A , __A)
_a = p * torch.log(__A)
_a = 0
return -plogp.sum(dim=-1)
def lowerCAmelCase (__A):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__A))))
for row in range(len(__A)):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data))
else:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data))
def lowerCAmelCase (__A , __A , __A , __A=True , __A=True , __A=None , __A=False):
"""simple docstring"""
_a , _a = model.config.num_hidden_layers, model.config.num_attention_heads
_a = torch.zeros(__A , __A).to(args.device)
_a = torch.zeros(__A , __A).to(args.device)
if head_mask is None:
_a = torch.ones(__A , __A).to(args.device)
head_mask.requires_grad_(requires_grad=__A)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a = None
_a = 0.0
_a = 0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0])):
_a = tuple(t.to(args.device) for t in inputs)
((_a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a = model(__A , labels=__A , head_mask=__A)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a , _a , _a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A):
_a = entropy(attn.detach() , __A)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a = 2
_a = torch.pow(torch.pow(__A , __A).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
_a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''')
print_ad_tensor(__A)
if compute_importance:
logger.info('''Head importance scores''')
print_ad_tensor(__A)
logger.info('''Head ranked by importance scores''')
_a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
_a = torch.arange(
head_importance.numel() , device=args.device)
_a = head_ranks.view_as(__A)
print_ad_tensor(__A)
return attn_entropy, head_importance, total_loss
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a , _a , _a = compute_heads_importance(__A , __A , __A , compute_entropy=__A)
_a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold)
_a = torch.ones_like(__A)
_a = max(1 , int(new_head_mask.numel() * args.masking_amount))
_a = original_score
while current_score >= original_score * args.masking_threshold:
_a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a = float('''Inf''')
_a = head_importance.view(-1).sort()[1]
if len(__A) <= num_to_mask:
print('''BREAK BY num_to_mask''')
break
# mask heads
_a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist()))
_a = new_head_mask.view(-1)
_a = 0.0
_a = new_head_mask.view_as(__A)
_a = new_head_mask.clone().detach()
print_ad_tensor(__A)
# Compute metric and head importance again
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A)
_a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''')
print_ad_tensor(__A)
np.save(os.path.join(args.output_dir , '''head_mask.npy''') , head_mask.detach().cpu().numpy())
return head_mask
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A)
_a = 1 / loss
_a = datetime.now() - before_time
_a = sum(p.numel() for p in model.parameters())
_a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A))
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A):
_a = [
v,
]
assert sum(len(__A) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A)
_a = sum(p.numel() for p in model.parameters())
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_a = 1 / loss
_a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A)
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100)
save_model(__A , args.output_dir)
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''')
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''')
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''')
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''')
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''')
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''')
parser.add_argument('''--seed''' , type=__A , default=42)
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''')
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''')
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
_a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''')
_a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
_a = torch.device('''cuda''' , args.local_rank)
_a = 1
torch.distributed.init_process_group(backend='''nccl''') # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1)))
_a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
_a = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A)
elif args.n_gpu > 1:
_a = nn.DataParallel(__A)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A)
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin'''))
logger.info('''Training/evaluation parameters %s''' , __A)
# Prepare dataset
_a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
_a = (torch.from_numpy(__A),)
_a = TensorDataset(*__A)
_a = RandomSampler(__A)
_a = DataLoader(__A , sampler=__A , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a = mask_heads(__A , __A , __A)
prune_heads(__A , __A , __A , __A)
if __name__ == "__main__":
main()
| 11 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Dict = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A_ : str = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : Tuple = spec.loader.load_module()
A_ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Any = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
A_ : Optional[int] = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def snake_case () -> Optional[Any]:
UpperCamelCase_: str = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCamelCase_: int = False
# source code of `config_class`
UpperCamelCase_: int = inspect.getsource(UpperCAmelCase__ )
UpperCamelCase_: List[Any] = _re_checkpoint.findall(UpperCAmelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase_ ,UpperCamelCase_: int = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase_: List[str] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCamelCase_: Optional[Any] = True
break
UpperCamelCase_: Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
UpperCamelCase_: List[Any] = '\n'.join(sorted(UpperCAmelCase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 57 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCAmelCase : Tuple = 25_6047
__lowerCAmelCase : int = 25_6145
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = NllbTokenizer
_lowerCamelCase = NllbTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = {}
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : str = NllbTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = NllbTokenizer(_lowercase , keep_accents=_lowercase )
snake_case_ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ : Any = tempfile.mkdtemp()
snake_case_ : Optional[int] = tokenizer_r.save_pretrained(_lowercase )
snake_case_ : Optional[int] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case_ : Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
snake_case_ : Optional[int] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : str = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Optional[Any] = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
snake_case_ : Optional[int] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
snake_case_ : Union[str, Any] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : str = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
snake_case_ : str = tempfile.mkdtemp()
snake_case_ : Any = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
snake_case_ : Optional[int] = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : List[str] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : Optional[int] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
snake_case_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
snake_case_ : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
snake_case_ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
snake_case_ : Any = tokenizer.prepare_seqaseq_batch(
src_texts=_lowercase , tgt_texts=_lowercase , max_length=3 , max_target_length=1_0 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 1_0 )
# max_target_length will default to max_length if not specified
snake_case_ : str = tokenizer.prepare_seqaseq_batch(
_lowercase , tgt_texts=_lowercase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
snake_case_ : Optional[int] = tokenizer.prepare_seqaseq_batch(
src_texts=_lowercase , max_length=3 , max_target_length=1_0 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , _lowercase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : List[str] = [AddedToken("""<special>""" , lstrip=_lowercase )]
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , **_lowercase )
snake_case_ : Dict = tokenizer_r.encode("""Hey this is a <special> token""" )
snake_case_ : int = tokenizer_r.encode("""<special>""" , add_special_tokens=_lowercase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , **_lowercase )
snake_case_ : Tuple = tokenizer_p.encode("""Hey this is a <special> token""" )
snake_case_ : int = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = '''facebook/nllb-200-distilled-600M'''
_lowerCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_lowerCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_lowerCamelCase = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def UpperCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
snake_case_ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
snake_case_ : Dict = 1
return cls
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 2_5_6_0_5_7 )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : List[Any] = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
snake_case_ : int = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
snake_case_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , _lowercase )
snake_case_ : Optional[int] = 1_0
snake_case_ : Optional[int] = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _lowercase )
self.assertEqual(len(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_6_2_0_3, 3] )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = tempfile.mkdtemp()
snake_case_ : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowercase )
snake_case_ : Union[str, Any] = NllbTokenizer.from_pretrained(_lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase )
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case_ : Optional[Any] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 1_5) , batch.input_ids.shape )
self.assertEqual((2, 1_5) , batch.attention_mask.shape )
snake_case_ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
self.assertEqual(_lowercase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors="""pt""" )
snake_case_ : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=1_0 , return_tensors="""pt""" )
snake_case_ : str = targets["""input_ids"""]
snake_case_ : Optional[int] = shift_tokens_right(
_lowercase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(_lowercase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_6_0_5_7,
} , )
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = True
snake_case_ : Optional[int] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
snake_case_ : str = False
snake_case_ : List[Any] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
| 58 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: List[Any] =split_dict._to_yaml_list()
assert len(__a ) == len(__a )
lowerCamelCase__: List[str] =SplitDict._from_yaml_list(__a )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCamelCase__: Any =None
# the split name of split_dict takes over the name of the split info object
lowerCamelCase__: str =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=__a ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 59 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]:
"""simple docstring"""
return [ord(_UpperCamelCase ) - 96 for elem in plain]
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : List[Any] = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _UpperCamelCase )
print('''Decoded:''' , decode(_UpperCamelCase ) )
if __name__ == "__main__":
main()
| 60 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = OpenLlamaModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
_a = original_model(A ).last_hidden_state
_a = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
_a = scaled_model(A ).last_hidden_state
_a = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 11 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase = '\n{0} = None\n'
UpperCamelCase = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def _A ( lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = _re_backend.findall(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 0:
return None
return "_and_".join(lowerCAmelCase_ )
def _A ( ):
"""simple docstring"""
with open(os.path.join(lowerCAmelCase_ , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCAmelCase__ = 0
lowerCAmelCase__ = {}
# Go through the end of the file
while line_index < len(lowerCAmelCase_ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCAmelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(lowerCAmelCase_ ) and len(lines[line_index] ) > 1:
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_single_line_import.search(lowerCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowerCAmelCase_ ) > 0:
lowerCAmelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(lowerCAmelCase_ )
elif name.islower():
return DUMMY_FUNCTION.format(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return DUMMY_CLASS.format(lowerCAmelCase_ , lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Tuple=None ):
"""simple docstring"""
if backend_specific_objects is None:
lowerCAmelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCAmelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCAmelCase__ = "[" + ", ".join(F'"{b}"' for b in backend.split("_and_" ) ) + "]"
lowerCAmelCase__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowerCAmelCase_ , lowerCAmelCase_ ) for o in objects] )
lowerCAmelCase__ = dummy_file
return dummy_files
def _A ( lowerCAmelCase_ : Optional[int]=False ):
"""simple docstring"""
lowerCAmelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCAmelCase__ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "utils" )
lowerCAmelCase__ = {
backend: os.path.join(lowerCAmelCase_ , F'dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py' )
for backend in dummy_files.keys()
}
lowerCAmelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.read()
else:
lowerCAmelCase__ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py as the main '
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
F'diffusers.utils.dummy_{short_names.get(lowerCAmelCase_ , lowerCAmelCase_ )}_objects.py. Run `make fix-copies` '
"to fix this." )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 61 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def a__ (self ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxAlbertModelTester(self )
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained('''albert-base-v2''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(A , attention_mask=A )[0]
_a = (1, 11, 768)
self.assertEqual(output.shape , A )
_a = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = AudioLDMPipeline
UpperCamelCase_ : Dict = TEXT_TO_AUDIO_PARAMS
UpperCamelCase_ : Dict = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCamelCase_ : Union[str, Any] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _A ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
SCREAMING_SNAKE_CASE : Dict = ClapTextModelWithProjection(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
SCREAMING_SNAKE_CASE : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : int = SpeechTaHifiGan(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int]=0 ):
if str(UpperCAmelCase_ ).startswith("mps" ):
SCREAMING_SNAKE_CASE : str = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Tuple = AudioLDMPipeline(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = audioldm_pipe.to(UpperCAmelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = audioldm_pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase_ ) == 256
SCREAMING_SNAKE_CASE : str = audio[:10]
SCREAMING_SNAKE_CASE : List[Any] = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] = AudioLDMPipeline(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = audioldm_pipe.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = audioldm_pipe.to(UpperCAmelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = 3 * [inputs["prompt"]]
# forward
SCREAMING_SNAKE_CASE : List[Any] = audioldm_pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = output.audios[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = 3 * [inputs.pop("prompt" )]
SCREAMING_SNAKE_CASE : List[str] = audioldm_pipe.tokenizer(
UpperCAmelCase_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_inputs["input_ids"].to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = audioldm_pipe.text_encoder(
UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE : List[Any] = F.normalize(UpperCAmelCase_ , dim=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE : Any = audioldm_pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = AudioLDMPipeline(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = audioldm_pipe.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = audioldm_pipe.to(UpperCAmelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE : Optional[Any] = negative_prompt
SCREAMING_SNAKE_CASE : str = 3 * [inputs["prompt"]]
# forward
SCREAMING_SNAKE_CASE : Dict = audioldm_pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = output.audios[0]
SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = 3 * [inputs.pop("prompt" )]
SCREAMING_SNAKE_CASE : Any = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE : Any = audioldm_pipe.tokenizer(
UpperCAmelCase_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : Dict = text_inputs["input_ids"].to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = audioldm_pipe.text_encoder(
UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE : Optional[int] = F.normalize(UpperCAmelCase_ , dim=-1 )
embeds.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = embeds
# forward
SCREAMING_SNAKE_CASE : Dict = audioldm_pipe(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = AudioLDMPipeline(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = audioldm_pipe.to(UpperCAmelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = "egg cracking"
SCREAMING_SNAKE_CASE : int = audioldm_pipe(**UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase_ ) == 256
SCREAMING_SNAKE_CASE : Union[str, Any] = audio[:10]
SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : int = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = AudioLDMPipeline(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = audioldm_pipe.to(UpperCAmelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE : str = audioldm_pipe(UpperCAmelCase_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE : Any = 2
SCREAMING_SNAKE_CASE : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE : Dict = 2
SCREAMING_SNAKE_CASE : Any = audioldm_pipe(UpperCAmelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCAmelCase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCAmelCase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = AudioLDMPipeline(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = audioldm_pipe.to(UpperCAmelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = audioldm_pipe(audio_length_in_s=0.016 , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase_ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE : Optional[int] = audioldm_pipe(audio_length_in_s=0.032 , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase_ ) / vocoder_sampling_rate == 0.032
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = AudioLDMPipeline(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = audioldm_pipe.to(UpperCAmelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = ["hey"]
SCREAMING_SNAKE_CASE : Optional[Any] = audioldm_pipe(UpperCAmelCase_ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE : Optional[int] = output.audios.shape
assert audio_shape == (1, 256)
SCREAMING_SNAKE_CASE : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGan(UpperCAmelCase_ ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = audioldm_pipe(UpperCAmelCase_ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE : List[Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _A ( self : Union[str, Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ )
def _A ( self : Any ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCAmelCase_ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _A ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ )
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int="cpu" , UpperCAmelCase_ : Optional[int]=torch.floataa , UpperCAmelCase_ : Optional[Any]=0 ):
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(UpperCAmelCase_ ).standard_normal((1, 8, 128, 16) )
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
SCREAMING_SNAKE_CASE : Optional[int] = audioldm_pipe.to(UpperCAmelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.get_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 25
SCREAMING_SNAKE_CASE : Dict = audioldm_pipe(**UpperCAmelCase_ ).audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase_ ) == 8_1920
SCREAMING_SNAKE_CASE : str = audio[7_7230:7_7240]
SCREAMING_SNAKE_CASE : Dict = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : str = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
SCREAMING_SNAKE_CASE : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE : Optional[int] = audioldm_pipe.to(UpperCAmelCase_ )
audioldm_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.get_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = audioldm_pipe(**UpperCAmelCase_ ).audios[0]
assert audio.ndim == 1
assert len(UpperCAmelCase_ ) == 8_1920
SCREAMING_SNAKE_CASE : List[Any] = audio[2_7780:2_7790]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 62 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
a : Any = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[Any] , *__lowercase : Tuple , **__lowercase : Dict ) -> None:
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 63 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
def A__ ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : str=False ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__: int= len(set_a.intersection(snake_case_ ) )
if alternative_union:
SCREAMING_SNAKE_CASE__: int= len(snake_case_ ) + len(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: str= len(set_a.union(snake_case_ ) )
return intersection / union
if isinstance(snake_case_ , (list, tuple) ) and isinstance(snake_case_ , (list, tuple) ):
SCREAMING_SNAKE_CASE__: Any= [element for element in set_a if element in set_b]
if alternative_union:
SCREAMING_SNAKE_CASE__: List[str]= len(snake_case_ ) + len(snake_case_ )
return len(snake_case_ ) / union
else:
SCREAMING_SNAKE_CASE__: Optional[int]= set_a + [element for element in set_b if element not in set_a]
return len(snake_case_ ) / len(snake_case_ )
return len(snake_case_ ) / len(snake_case_ )
return None
if __name__ == "__main__":
lowercase_ : Any = {'a', 'b', 'c', 'd', 'e'}
lowercase_ : Optional[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 64 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 100 , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = x_start
UpperCAmelCase__ : List[Any] = fnc(__UpperCamelCase )
UpperCAmelCase__ : Any = 0.0
for _ in range(__UpperCamelCase ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase__ : str = (x_end - x_start) / steps + xa
UpperCAmelCase__ : Optional[int] = fnc(__UpperCamelCase )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase__ : int = xa
UpperCAmelCase__ : Dict = fxa
return area
if __name__ == "__main__":
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
__UpperCAmelCase = 10
while i <= 10_0000:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 65 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11 | 0 |
import os
import pytest
from attr import dataclass
UpperCamelCase = "us-east-1" # defaults region
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : str
_UpperCamelCase : int = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
_UpperCamelCase : Dict = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
_UpperCamelCase : int = {**hyperparameters, "max_steps": 1000}
@property
def __a ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __a ( self ):
return F"""{self.framework}-transfromers-test"""
@property
def __a ( self ):
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def __a ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 66 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case = logging.get_logger(__name__)
snake_case = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''whisper'''
SCREAMING_SNAKE_CASE_ : str = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int ,__A : Dict=5_1865 ,__A : List[str]=80 ,__A : str=6 ,__A : List[str]=4 ,__A : int=6 ,__A : Optional[Any]=4 ,__A : int=1536 ,__A : List[str]=1536 ,__A : Optional[Any]=0.0 ,__A : Union[str, Any]=0.0 ,__A : int=5_0257 ,__A : Dict=True ,__A : int=True ,__A : int="gelu" ,__A : Union[str, Any]=256 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : Optional[int]=0.0 ,__A : List[Any]=0.02 ,__A : Optional[int]=False ,__A : int=1500 ,__A : Union[str, Any]=448 ,__A : Optional[Any]=5_0256 ,__A : List[str]=5_0256 ,__A : Dict=5_0256 ,__A : int=None ,__A : Optional[Any]=[220, 5_0256] ,__A : Optional[Any]=False ,__A : Dict=256 ,__A : Tuple=False ,__A : Union[str, Any]=0.05 ,__A : Optional[Any]=10 ,__A : int=2 ,__A : Optional[int]=0.0 ,__A : Optional[int]=10 ,__A : Any=0 ,__A : Optional[int]=7 ,**__A : str ,) -> List[Any]:
_lowercase = vocab_size
_lowercase = num_mel_bins
_lowercase = d_model
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = encoder_ffn_dim
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase = max_source_positions
_lowercase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_lowercase = classifier_proj_size
_lowercase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase = apply_spec_augment
_lowercase = mask_time_prob
_lowercase = mask_time_length
_lowercase = mask_time_min_masks
_lowercase = mask_feature_prob
_lowercase = mask_feature_length
_lowercase = mask_feature_min_masks
_lowercase = median_filter_width
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,suppress_tokens=__A ,begin_suppress_tokens=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
_lowercase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
return common_inputs
def __UpperCAmelCase ( self : Tuple ,__A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional["TensorType"] = None ,__A : int = 2_2050 ,__A : float = 5.0 ,__A : int = 220 ,) -> Mapping[str, Any]:
_lowercase = OrderedDict()
_lowercase = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=__A ,framework=__A ,sampling_rate=__A ,time_duration=__A ,frequency=__A ,)
_lowercase = encoder_inputs['input_features'].shape[2]
_lowercase = encoder_sequence_length // 2 if self.use_past else seq_length
_lowercase = super().generate_dummy_inputs(
preprocessor.tokenizer ,__A ,__A ,__A ,__A )
_lowercase = encoder_inputs.pop('input_features' )
_lowercase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
_lowercase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> float:
return 1e-3 | 67 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = r'''.*/layers_(\d+)'''
_a = key
if re.match(__A , __A):
_a = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __A)
_a = r'''(encoder|decoder)\/'''
if re.match(__A , __A):
_a = re.match(__A , __A).groups()
if groups[0] == "encoder":
_a = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __A)
elif groups[0] == "decoder":
_a = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __A)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(__A):
_a = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(__A)
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import regex as re
with open(__A , '''r''') as f:
_a = f.read()
_a = re.findall(r'''(.*) = ([0-9.]*)''' , __A)
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(__A) if '''.''' in value else int(__A)
_a = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __A)[0]
_a = str(activation[1])
_a = num_experts
_a = SwitchTransformersConfig(**__A)
return config
def lowerCAmelCase (__A , __A , __A=None , __A="./" , __A=8):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_a = checkpoints.load_tax_checkpoint(__A)
if gin_file is not None:
_a = convert_gin_to_config(__A , __A)
else:
_a = SwitchTransformersConfig.from_pretrained(__A)
_a = SwitchTransformersForConditionalGeneration(__A)
_a = flax_params['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = rename_keys(__A)
_a = unflatten_dict(__A , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 11 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__A = re.compile(r"\s+")
def lowercase__ ( A_: int ) -> Any:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(A_ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def lowercase__ ( A_: int ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =[len(A_ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(A_ ), "line_max": max(A_ )}
def lowercase__ ( A_: Any ) -> int:
"""simple docstring"""
__UpperCAmelCase =np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def lowercase__ ( A_: List[Any] , A_: Tuple ) -> str:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def lowercase__ ( A_: List[str] , A_: Dict=5 ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase =["""auto-generated""", """autogenerated""", """automatically generated"""]
__UpperCAmelCase =example["""content"""].splitlines()
for _, line in zip(range(A_ ) , A_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowercase__ ( A_: str , A_: List[Any]=5 , A_: List[Any]=0.0_5 ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase =["""unit tests""", """test file""", """configuration file"""]
__UpperCAmelCase =example["""content"""].splitlines()
__UpperCAmelCase =0
__UpperCAmelCase =0
# first test
for _, line in zip(range(A_ ) , A_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__UpperCAmelCase =example["""content"""].count("""\n""" )
__UpperCAmelCase =int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowercase__ ( A_: Any ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =["""def """, """class """, """for """, """while """]
__UpperCAmelCase =example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowercase__ ( A_: Optional[int] , A_: List[Any]=4 ) -> Any:
"""simple docstring"""
__UpperCAmelCase =example["""content"""].splitlines()
__UpperCAmelCase =0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowercase__ ( A_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase =tokenizer(example["""content"""] , truncation=A_ )["""input_ids"""]
__UpperCAmelCase =len(example["""content"""] ) / len(A_ )
return {"ratio": ratio}
def lowercase__ ( A_: int ) -> str:
"""simple docstring"""
__UpperCAmelCase ={}
results.update(get_hash(A_ ) )
results.update(line_stats(A_ ) )
results.update(alpha_stats(A_ ) )
results.update(char_token_ratio(A_ ) )
results.update(is_autogenerated(A_ ) )
results.update(is_config_or_test(A_ ) )
results.update(has_no_keywords(A_ ) )
results.update(has_few_assignments(A_ ) )
return results
def lowercase__ ( A_: Dict , A_: Any , A_: List[str] ) -> str:
"""simple docstring"""
if not check_uniques(A_ , A_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowercase__ ( A_: List[Any] ) -> Tuple:
"""simple docstring"""
with open(A_ , """rb""" ) as f_in:
with gzip.open(str(A_ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(A_ , A_ )
os.unlink(A_ )
# Settings
__A = HfArgumentParser(PreprocessingArguments)
__A = parser.parse_args()
if args.num_workers is None:
__A = multiprocessing.cpu_count()
__A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__A = time.time()
__A = load_dataset(args.dataset_name, split="train")
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
__A = time.time()
__A = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
__A = set(ds.unique("hash"))
__A = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
__A = time.time()
__A = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__A = time.time()
__A , __A = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
__A = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
__A = output_dir / "data"
data_dir.mkdir(exist_ok=True)
__A = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__A = str(data_dir / F"""file-{file_number+1:012}.json""")
__A = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 68 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
__snake_case = BlipImageProcessor()
__snake_case = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__snake_case = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
__snake_case = InstructBlipProcessor(a_ , a_ , a_ )
processor.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] , **a_ : List[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a_ ).tokenizer
def A ( self : Any , **a_ : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a_ ).image_processor
def A ( self : Any , **a_ : List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a_ ).qformer_tokenizer
def A ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__snake_case = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
__snake_case = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
self.assertIsInstance(processor.qformer_tokenizer , a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=a_ , image_processor=a_ , qformer_tokenizer=a_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(a_ , return_tensors="np" )
__snake_case = processor(images=a_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=a_ , image_processor=a_ , qformer_tokenizer=a_ )
__snake_case = "lower newer"
__snake_case = processor(text=a_ )
__snake_case = tokenizer(a_ , return_token_type_ids=a_ )
__snake_case = qformer_tokenizer(a_ , return_token_type_ids=a_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=a_ , image_processor=a_ , qformer_tokenizer=a_ )
__snake_case = "lower newer"
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=a_ , images=a_ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=a_ , image_processor=a_ , qformer_tokenizer=a_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(a_ )
__snake_case = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = self.get_qformer_tokenizer()
__snake_case = InstructBlipProcessor(
tokenizer=a_ , image_processor=a_ , qformer_tokenizer=a_ )
__snake_case = "lower newer"
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=a_ , images=a_ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 69 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11 | 0 |
import numpy as np
from transformers import Pipeline
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = np.max(lowercase , axis=-1 , keepdims=lowercase )
lowerCamelCase_ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Tuple , **A_ : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = {}
if "second_text" in kwargs:
lowerCamelCase_ = kwargs['second_text']
return preprocess_kwargs, {}, {}
def a__ ( self : Union[str, Any] , A_ : List[str] , A_ : int=None ) -> str:
"""simple docstring"""
return self.tokenizer(A_ , text_pair=A_ , return_tensors=self.framework )
def a__ ( self : List[str] , A_ : int ) -> Optional[Any]:
"""simple docstring"""
return self.model(**A_ )
def a__ ( self : Optional[Any] , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = model_outputs.logits[0].numpy()
lowerCamelCase_ = softmax(A_ )
lowerCamelCase_ = np.argmax(A_ )
lowerCamelCase_ = self.model.config.idalabel[best_class]
lowerCamelCase_ = probabilities[best_class].item()
lowerCamelCase_ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 70 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
'''simple docstring'''
def __init__(self ) -> Dict:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def a__ (self , A ) -> Dict:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class __A ( A ):
'''simple docstring'''
def a__ (self , A , *A , **A ) -> Optional[Any]:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __A ( A ):
'''simple docstring'''
def a__ (self , A , A ) -> int:
"""simple docstring"""
return output + 1
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
self.assertEqual(test_model._hf_hook , A )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
add_hook_to_module(A , A , append=A )
self.assertEqual(isinstance(test_model._hf_hook , A ) , A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(x + 1 )
_a = test_model(x + 2 )
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , A , atol=1E-5 )
def a__ (self ) -> str:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , output + 2 , atol=1E-5 )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a = True
_a = test_model(A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(A , AlignDevicesHook(io_same_device=A ) )
_a = torch.randn(2 , 3 ).to(0 )
_a = model(A )
self.assertEqual(output.device , torch.device(0 ) )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
_a = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(A , execution_device=A , offload=A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(A , execution_device=A , offload=A , offload_buffers=A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() , offload_buffers=A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 11 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = path_or_paths
UpperCAmelCase_ : Optional[int] = split if split or isinstance(_snake_case ,_snake_case ) else "train"
UpperCAmelCase_ : Tuple = features
UpperCAmelCase_ : int = cache_dir
UpperCAmelCase_ : Optional[Any] = keep_in_memory
UpperCAmelCase_ : Any = streaming
UpperCAmelCase_ : List[str] = num_proc
UpperCAmelCase_ : int = kwargs
@abstractmethod
def UpperCamelCase__ ( self ):
pass
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Union[str, Any] = features
UpperCAmelCase_ : Dict = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : int = streaming
UpperCAmelCase_ : Tuple = num_proc
UpperCAmelCase_ : Optional[Any] = kwargs
@abstractmethod
def UpperCamelCase__ ( self ):
pass
| 71 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = BlenderbotSmallTokenizer
UpperCamelCase__ = False
def _A( self ):
super().setUp()
lowercase =['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
lowercase =dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowercase =['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
lowercase ={'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case_ ) )
def _A( self , **snake_case_ ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def _A( self , snake_case_ ):
lowercase ='''adapt act apte'''
lowercase ='''adapt act apte'''
return input_text, output_text
def _A( self ):
lowercase =BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase ='''adapt act apte'''
lowercase =['''adapt''', '''act''', '''ap@@''', '''te''']
lowercase =tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase =[tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowercase =[0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def _A( self ):
lowercase =BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
lowercase ='''I am a small frog.'''
lowercase =tok([src_text] , padding=snake_case_ , truncation=snake_case_ )['''input_ids''']
lowercase =tok.batch_decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _A( self ):
lowercase =BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
lowercase ='''I am a small frog .'''
lowercase ='''.'''
lowercase =tok(snake_case_ )['''input_ids''']
lowercase =tok(snake_case_ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 72 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = act_dim
_a = state_dim
_a = hidden_size
_a = max_length
_a = is_training
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_a = random_attention_mask((self.batch_size, self.seq_length) )
_a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ (self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ (self , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = DecisionTransformerModel(config=A )
model.to(A )
model.eval()
_a = model(A , A , A , A , A , A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] = ()
__lowerCamelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = DecisionTransformerModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DecisionTransformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A )] , A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 2 # number of steps of autoregressive prediction we will perform
_a = 10 # defined by the RL environment, may be normalized
_a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_a = model.to(A )
_a = model.config
torch.manual_seed(0 )
_a = torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ) # env.reset()
_a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A )
_a = torch.tensor(A , device=A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a = state
_a = torch.zeros(1 , 0 , config.act_dim , device=A , dtype=torch.floataa )
_a = torch.zeros(1 , 0 , device=A , dtype=torch.floataa )
_a = torch.tensor(0 , device=A , dtype=torch.long ).reshape(1 , 1 )
for step in range(A ):
_a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A )] , dim=1 )
_a = torch.cat([rewards, torch.zeros(1 , 1 , device=A )] , dim=1 )
_a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a , _a , _a = model(
states=A , actions=A , rewards=A , returns_to_go=A , timesteps=A , attention_mask=A , return_dict=A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a , _a , _a , _a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ),
1.0,
False,
{},
)
_a = action_pred[0, -1]
_a = torch.cat([states, state] , dim=1 )
_a = returns_to_go[0, -1] - reward
_a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a = torch.cat(
[timesteps, torch.ones((1, 1) , device=A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 11 | 0 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ : List[str] = '▁'
a_ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : str = BigBirdTokenizer
_lowercase : Any = BigBirdTokenizerFast
_lowercase : Tuple = True
_lowercase : int = True
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE = self.tokenizer_class(a , keep_accents=a)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = '<s>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a) , a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a) , a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<unk>')
self.assertEqual(vocab_keys[1] , '<s>')
self.assertEqual(vocab_keys[-1] , '[MASK]')
self.assertEqual(len(a) , 1004)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(a)
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(a)
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = BigBirdTokenizer(a , keep_accents=a)
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test')
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a) , [285, 46, 10, 170, 382] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(a)
self.assertListEqual(
a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(a)
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'Hello World!'
SCREAMING_SNAKE_CASE = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(a , self.big_tokenizer.encode(a))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
SCREAMING_SNAKE_CASE = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a , self.big_tokenizer.encode(a))
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys())[:10]
SCREAMING_SNAKE_CASE = ' '.join(a)
SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a)
SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a)
SCREAMING_SNAKE_CASE = BigBirdConfig(attention_type='original_full')
SCREAMING_SNAKE_CASE = BigBirdModel(a)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a)
model(**a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
SCREAMING_SNAKE_CASE = tokenizer.decode(tokenizer('Paris is the [MASK].').input_ids)
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]')
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# fmt: off
SCREAMING_SNAKE_CASE = {'input_ids': [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 73 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
return len(set(__A)) == len(__A)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowercase_ = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
lowercase_ = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
lowercase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowercase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowercase_ = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
lowercase_ = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowercase_ = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
lowercase_ = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowercase_ = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
lowercase_ = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowercase_ = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
lowercase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowercase_ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
lowercase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
lowercase_ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
lowercase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
lowercase_ = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
lowercase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
lowercase_ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
lowercase_ = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowercase_ = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
lowercase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
lowercase_ = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
lowercase_ = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowercase_ = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
lowercase_ = """"""
lowercase_ = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
lowercase_ = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowercase_ = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
assert ReadMe.from_string(snake_case , snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
with pytest.raises(snake_case , match=re.escape(expected_error.format(path='''root''' ) ) ):
__SCREAMING_SNAKE_CASE : int = ReadMe.from_string(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
with pytest.raises(snake_case , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(snake_case , snake_case )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def a__ ( snake_case ):
"""simple docstring"""
ReadMe.from_string(snake_case , snake_case , suppress_parsing_errors=snake_case )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : Any = Path(snake_case ) / '''README.md'''
with open(snake_case , '''w+''' ) as readme_file:
readme_file.write(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = ReadMe.from_readme(snake_case , snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : Optional[int] = Path(snake_case ) / '''README.md'''
with open(snake_case , '''w+''' ) as readme_file:
readme_file.write(snake_case )
__SCREAMING_SNAKE_CASE : str = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
__SCREAMING_SNAKE_CASE : Dict = ReadMe.from_readme(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : int = Path(snake_case ) / '''README.md'''
with open(snake_case , '''w+''' ) as readme_file:
readme_file.write(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
ReadMe.from_readme(snake_case , snake_case )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def a__ ( snake_case ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE : List[str] = Path(snake_case ) / '''README.md'''
with open(snake_case , '''w+''' ) as readme_file:
readme_file.write(snake_case )
ReadMe.from_readme(snake_case , snake_case , suppress_parsing_errors=snake_case )
| 74 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if len(__A) == 0:
return False
_a = len(__A) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __A)
else:
return binary_search(a_list[midpoint + 1 :] , __A)
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be found in the list:\n").strip())
lowercase_ = "" if binary_search(sequence, target) else "not "
print(F"""{target} was {not_str}found in {sequence}""")
| 11 | 0 |
'''simple docstring'''
from math import pi
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 75 |
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = len(A )
_a = [0] * len_array
if len_array > 0:
_a = array[0]
for i in range(1 , A ):
_a = self.prefix_sum[i - 1] + array[i]
def a__ (self , A , A ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self , A ) -> bool:
"""simple docstring"""
_a = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ConvNextFeatureExtractor']
a_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 76 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A)
if n > 1:
factors.append(__A)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A = logging.get_logger(__name__)
A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
A = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
A = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = BertTokenizer
def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Union[str, Any]="[UNK]" , UpperCamelCase_ : Union[str, Any]="[SEP]" , UpperCamelCase_ : List[Any]="[PAD]" , UpperCamelCase_ : List[Any]="[CLS]" , UpperCamelCase_ : Tuple="[MASK]" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCamelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase_) != tokenize_chinese_chars
):
__UpperCAmelCase : Dict = getattr(UpperCamelCase_ , normalizer_state.pop("type"))
__UpperCAmelCase : Any = do_lower_case
__UpperCAmelCase : List[str] = strip_accents
__UpperCAmelCase : Any = tokenize_chinese_chars
__UpperCAmelCase : Union[str, Any] = normalizer_class(**UpperCamelCase_)
__UpperCAmelCase : List[Any] = do_lower_case
def a_ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any]=None):
"""simple docstring"""
__UpperCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_)
return tuple(UpperCamelCase_)
| 77 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
from typing import Any
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : list , snake_case_ : dict , snake_case_ : dict , snake_case_ : dict , ) -> list:
'''simple docstring'''
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for state in states_space:
UpperCAmelCase_ = observations_space[0]
UpperCAmelCase_ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCAmelCase_ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
UpperCAmelCase_ = observations_space[o]
UpperCAmelCase_ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCAmelCase_ = ""
UpperCAmelCase_ = -1
for k_state in states_space:
UpperCAmelCase_ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCAmelCase_ = probability
UpperCAmelCase_ = k_state
# Update probabilities and pointers dicts
UpperCAmelCase_ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCAmelCase_ = arg_max
# The final observation
UpperCAmelCase_ = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
UpperCAmelCase_ = ""
UpperCAmelCase_ = -1
for k_state in states_space:
UpperCAmelCase_ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCAmelCase_ = probability
UpperCAmelCase_ = k_state
UpperCAmelCase_ = arg_max
# Process pointers backwards
UpperCAmelCase_ = last_state
UpperCAmelCase_ = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
UpperCAmelCase_ = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any ) -> None:
'''simple docstring'''
_validate_list(snake_case_ , "observations_space" )
_validate_list(snake_case_ , "states_space" )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , snake_case_ ):
UpperCAmelCase_ = f"""{var_name} must be a list"""
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = f"""{var_name} must be a list of strings"""
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Any , ) -> None:
'''simple docstring'''
_validate_dict(snake_case_ , "initial_probabilities" , snake_case_ )
_validate_nested_dict(snake_case_ , "transition_probabilities" )
_validate_nested_dict(snake_case_ , "emission_probabilities" )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : str , snake_case_ : type , snake_case_ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , snake_case_ ):
UpperCAmelCase_ = f"""{var_name} must be a dict"""
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
UpperCAmelCase_ = f"""{var_name} all keys must be strings"""
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
UpperCAmelCase_ = "nested dictionary " if nested else ""
UpperCAmelCase_ = f"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ = logging.getLogger(__name__)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if os.path.exists(__A):
if os.path.exists(os.path.join(__A , '''config.json''')) and os.path.isfile(
os.path.join(__A , '''config.json''')):
os.remove(os.path.join(__A , '''config.json'''))
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''')) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''')):
os.remove(os.path.join(__A , '''pytorch_model.bin'''))
else:
os.makedirs(__A)
model.save_pretrained(__A)
def lowerCAmelCase (__A , __A=False):
"""simple docstring"""
_a = 2
if unlogit:
_a = torch.pow(__A , __A)
_a = p * torch.log(__A)
_a = 0
return -plogp.sum(dim=-1)
def lowerCAmelCase (__A):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__A))))
for row in range(len(__A)):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data))
else:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data))
def lowerCAmelCase (__A , __A , __A , __A=True , __A=True , __A=None , __A=False):
"""simple docstring"""
_a , _a = model.config.num_hidden_layers, model.config.num_attention_heads
_a = torch.zeros(__A , __A).to(args.device)
_a = torch.zeros(__A , __A).to(args.device)
if head_mask is None:
_a = torch.ones(__A , __A).to(args.device)
head_mask.requires_grad_(requires_grad=__A)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a = None
_a = 0.0
_a = 0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0])):
_a = tuple(t.to(args.device) for t in inputs)
((_a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a = model(__A , labels=__A , head_mask=__A)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a , _a , _a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A):
_a = entropy(attn.detach() , __A)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a = 2
_a = torch.pow(torch.pow(__A , __A).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
_a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''')
print_ad_tensor(__A)
if compute_importance:
logger.info('''Head importance scores''')
print_ad_tensor(__A)
logger.info('''Head ranked by importance scores''')
_a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
_a = torch.arange(
head_importance.numel() , device=args.device)
_a = head_ranks.view_as(__A)
print_ad_tensor(__A)
return attn_entropy, head_importance, total_loss
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a , _a , _a = compute_heads_importance(__A , __A , __A , compute_entropy=__A)
_a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold)
_a = torch.ones_like(__A)
_a = max(1 , int(new_head_mask.numel() * args.masking_amount))
_a = original_score
while current_score >= original_score * args.masking_threshold:
_a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a = float('''Inf''')
_a = head_importance.view(-1).sort()[1]
if len(__A) <= num_to_mask:
print('''BREAK BY num_to_mask''')
break
# mask heads
_a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist()))
_a = new_head_mask.view(-1)
_a = 0.0
_a = new_head_mask.view_as(__A)
_a = new_head_mask.clone().detach()
print_ad_tensor(__A)
# Compute metric and head importance again
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A)
_a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''')
print_ad_tensor(__A)
np.save(os.path.join(args.output_dir , '''head_mask.npy''') , head_mask.detach().cpu().numpy())
return head_mask
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A)
_a = 1 / loss
_a = datetime.now() - before_time
_a = sum(p.numel() for p in model.parameters())
_a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A))
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A):
_a = [
v,
]
assert sum(len(__A) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A)
_a = sum(p.numel() for p in model.parameters())
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_a = 1 / loss
_a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A)
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100)
save_model(__A , args.output_dir)
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''')
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''')
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''')
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''')
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''')
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''')
parser.add_argument('''--seed''' , type=__A , default=42)
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''')
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''')
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
_a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''')
_a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
_a = torch.device('''cuda''' , args.local_rank)
_a = 1
torch.distributed.init_process_group(backend='''nccl''') # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1)))
_a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
_a = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A)
elif args.n_gpu > 1:
_a = nn.DataParallel(__A)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A)
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin'''))
logger.info('''Training/evaluation parameters %s''' , __A)
# Prepare dataset
_a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
_a = (torch.from_numpy(__A),)
_a = TensorDataset(*__A)
_a = RandomSampler(__A)
_a = DataLoader(__A , sampler=__A , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a = mask_heads(__A , __A , __A)
prune_heads(__A , __A , __A , __A)
if __name__ == "__main__":
main()
| 11 | 0 |
import math
import qiskit
def _lowerCamelCase ( __lowerCamelCase = 1 , __lowerCamelCase = 1 , __lowerCamelCase = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(__lowerCamelCase , __lowerCamelCase )
or isinstance(__lowerCamelCase , __lowerCamelCase )
or isinstance(__lowerCamelCase , __lowerCamelCase )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(__lowerCamelCase ) != input_a)
or (math.floor(__lowerCamelCase ) != input_a)
or (math.floor(__lowerCamelCase ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
UpperCAmelCase__ : Optional[Any] = qiskit.QuantumRegister(4 , """qr""" )
UpperCAmelCase__ : Optional[Any] = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
UpperCAmelCase__ : List[str] = [input_a, input_a, carry_in]
UpperCAmelCase__ : List[Any] = qiskit.QuantumCircuit(__lowerCamelCase , __lowerCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowerCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowerCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowerCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowerCamelCase ) # measure the last two qbits
UpperCAmelCase__ : Tuple = qiskit.Aer.get_backend("""aer_simulator""" )
UpperCAmelCase__ : List[str] = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1000 )
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 79 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowercase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
__lowercase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
__lowercase = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(lowerCamelCase )[0].split(""".""" )[-2]
__lowercase = mapped_key.replace("""*""" , lowerCamelCase )
if "weight_g" in name:
__lowercase = """weight_g"""
elif "weight_v" in name:
__lowercase = """weight_v"""
elif "weight" in name:
__lowercase = """weight"""
elif "bias" in name:
__lowercase = """bias"""
else:
__lowercase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = full_name.split("""conv_layers.""" )[-1]
__lowercase = name.split(""".""" )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__lowercase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__lowercase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__lowercase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__lowercase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = SEWConfig()
if is_finetuned:
__lowercase = model.wav_encoder.wav_model.cfg
else:
__lowercase = model.cfg
__lowercase = fs_config.conv_bias
__lowercase = eval(fs_config.conv_feature_layers )
__lowercase = [x[0] for x in conv_layers]
__lowercase = [x[1] for x in conv_layers]
__lowercase = [x[2] for x in conv_layers]
__lowercase = """gelu"""
__lowercase = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
__lowercase = 0.0
__lowercase = fs_config.activation_fn.name
__lowercase = fs_config.encoder_embed_dim
__lowercase = 0.02
__lowercase = fs_config.encoder_ffn_embed_dim
__lowercase = 1e-5
__lowercase = fs_config.encoder_layerdrop
__lowercase = fs_config.encoder_attention_heads
__lowercase = fs_config.conv_pos_groups
__lowercase = fs_config.conv_pos
__lowercase = len(lowerCamelCase )
__lowercase = fs_config.encoder_layers
__lowercase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__lowercase = model.cfg
__lowercase = fs_config.final_dropout
__lowercase = fs_config.layerdrop
__lowercase = fs_config.activation_dropout
__lowercase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__lowercase = fs_config.attention_dropout
__lowercase = fs_config.dropout_input
__lowercase = fs_config.dropout
__lowercase = fs_config.mask_channel_length
__lowercase = fs_config.mask_channel_prob
__lowercase = fs_config.mask_length
__lowercase = fs_config.mask_prob
__lowercase = """Wav2Vec2FeatureExtractor"""
__lowercase = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True ):
'''simple docstring'''
if is_finetuned:
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__lowercase = SEWConfig.from_pretrained(lowerCamelCase )
else:
__lowercase = convert_config(model[0] , lowerCamelCase )
__lowercase = model[0].eval()
__lowercase = True if config.feat_extract_norm == """layer""" else False
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase , )
if is_finetuned:
if dict_path:
__lowercase = Dictionary.load(lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase = target_dict.pad_index
__lowercase = target_dict.bos_index
__lowercase = target_dict.pad_index
__lowercase = target_dict.bos_index
__lowercase = target_dict.eos_index
__lowercase = len(target_dict.symbols )
__lowercase = os.path.join(lowerCamelCase , """vocab.json""" )
if not os.path.isdir(lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase ) )
return
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , lowerCamelCase )
__lowercase = WavaVecaCTCTokenizer(
lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase , )
__lowercase = WavaVecaProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
__lowercase = SEWForCTC(lowerCamelCase )
else:
__lowercase = SEWModel(lowerCamelCase )
feature_extractor.save_pretrained(lowerCamelCase )
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 80 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : str = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = SpeechTaTokenizer
__UpperCAmelCase : Any = False
__UpperCAmelCase : str = True
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Tuple = SpeechTaTokenizer(lowerCamelCase )
__snake_case : Tuple = AddedToken("<mask>" , lstrip=lowerCamelCase , rstrip=lowerCamelCase )
__snake_case : Optional[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[int] ) -> Union[str, Any]:
__snake_case : Any = "this is a test"
__snake_case : Tuple = "this is a test"
return input_text, output_text
def __snake_case ( self : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=20 , lowerCamelCase : List[Any]=5 ) -> Optional[int]:
__snake_case , __snake_case : int = self.get_input_output_texts(lowerCamelCase )
__snake_case : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__snake_case : Optional[Any] = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
return text, ids
def __snake_case ( self : Any ) -> List[str]:
__snake_case : Tuple = "<pad>"
__snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(lowerCamelCase ) , 81 )
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __snake_case ( self : Union[str, Any] ) -> int:
__snake_case : Optional[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__snake_case : Optional[Any] = tokenizer.vocab_size
__snake_case : List[Any] = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__snake_case : List[Any] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__snake_case : int = tokenizer.add_tokens(lowerCamelCase )
__snake_case : Optional[int] = tokenizer.vocab_size
__snake_case : List[Any] = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , len(lowerCamelCase ) )
self.assertEqual(lowerCamelCase , all_size + len(lowerCamelCase ) )
__snake_case : Union[str, Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowerCamelCase )
self.assertGreaterEqual(len(lowerCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__snake_case : Union[str, Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__snake_case : Optional[int] = tokenizer.add_special_tokens(lowerCamelCase )
__snake_case : Dict = tokenizer.vocab_size
__snake_case : Optional[int] = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , len(lowerCamelCase ) )
self.assertEqual(lowerCamelCase , all_size_a + len(lowerCamelCase ) )
__snake_case : Optional[int] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowerCamelCase )
self.assertGreaterEqual(len(lowerCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __snake_case ( self : Optional[int] ) -> Optional[int]:
pass
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
pass
def __snake_case ( self : List[Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Union[str, Any] = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(lowerCamelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__snake_case : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
__snake_case : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase )
# fmt: off
self.assertListEqual(lowerCamelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def __snake_case ( self : Dict ) -> Dict:
# Use custom sequence because this tokenizer does not handle numbers.
__snake_case : str = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
__snake_case : List[Any] = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowerCamelCase , )
| 81 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = int(number**0.5 )
return number == sq * sq
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ = x_den * y_den * z_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def a__ ( lowerCAmelCase__ = 35 ):
UpperCAmelCase_ = set()
UpperCAmelCase_ = 42
UpperCAmelCase_ = Fraction(0 )
UpperCAmelCase_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCAmelCase_ = x_num * y_den + x_den * y_num
UpperCAmelCase_ = x_den * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
UpperCAmelCase_ = x_num * y_num
UpperCAmelCase_ = x_den * y_num + x_num * y_den
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
UpperCAmelCase_ = x_num * x_num * y_num * y_num
UpperCAmelCase_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = int(sqrt(lowerCAmelCase__ ) )
UpperCAmelCase_ = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 82 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = OpenLlamaModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
_a = original_model(A ).last_hidden_state
_a = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
_a = scaled_model(A ).last_hidden_state
_a = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 11 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Union[str, Any] = len(A_ )
for i in range(n - 1 ):
for j in range(i + 1, A_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
if len(A_ ) <= 1:
return arr, 0
_lowerCamelCase : int = len(A_ ) // 2
_lowerCamelCase : int = arr[0:mid]
_lowerCamelCase : int = arr[mid:]
_lowerCamelCase , _lowerCamelCase : str = count_inversions_recursive(A_ )
_lowerCamelCase , _lowerCamelCase : int = count_inversions_recursive(A_ )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = _count_cross_inversions(A_, A_ )
_lowerCamelCase : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def snake_case_ ( A_ : int, A_ : Dict ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = 0
while i < len(A_ ) and j < len(A_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCamelCase : List[str] = count_inversions_bf(A_ )
_lowerCamelCase , _lowerCamelCase : List[str] = count_inversions_recursive(A_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''', A_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCamelCase : List[Any] = count_inversions_bf(A_ )
_lowerCamelCase , _lowerCamelCase : List[Any] = count_inversions_recursive(A_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''', A_ )
# an empty list should also have zero inversions
_lowerCamelCase : str = []
_lowerCamelCase : Optional[int] = count_inversions_bf(A_ )
_lowerCamelCase , _lowerCamelCase : Any = count_inversions_recursive(A_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''', A_ )
if __name__ == "__main__":
main()
| 83 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def a__ (self ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxAlbertModelTester(self )
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained('''albert-base-v2''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(A , attention_mask=A )[0]
_a = (1, 11, 768)
self.assertEqual(output.shape , A )
_a = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('only integers accepted as input' )
else:
lowercase = str(abs(__SCREAMING_SNAKE_CASE ) )
lowercase = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int(''.join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 84 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class snake_case ( unittest.TestCase ):
def __lowercase( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(a_ , range(len(a_ ) ) ) )
SCREAMING_SNAKE_CASE__ : Dict = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
SCREAMING_SNAKE_CASE__ : str = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a_ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(a_ , a_ )
def __lowercase( self : Any , **a_ : Any )-> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Union[str, Any] , **a_ : Optional[int] )-> List[str]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Any , **a_ : List[str] )-> int:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Tuple )-> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Dict = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase( self : List[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=a_ )
SCREAMING_SNAKE_CASE__ : Dict = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a_ )
self.assertIsInstance(processor_fast.tokenizer , a_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a_ )
self.assertIsInstance(processor_fast.image_processor , a_ )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : str = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def __lowercase( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
SCREAMING_SNAKE_CASE__ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Dict = image_processor(a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(images=a_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase( self : Tuple )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE__ : List[str] = processor(text=a_ )
SCREAMING_SNAKE_CASE__ : str = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'lower newer'
SCREAMING_SNAKE_CASE__ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[str] = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def __lowercase( self : Any )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
SCREAMING_SNAKE_CASE__ : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : int = processor(images=a_ , visual_prompt=a_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPSegProcessor(tokenizer=a_ , image_processor=a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : Dict = processor.batch_decode(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ , a_ )
| 85 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments | 86 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **UpperCAmelCase__ : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__)
return config
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->str:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase__)):
# 1. predict noise residual
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='''v_prediction''')
A__ = scheduler_class(**UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase__)):
# 1. predict noise residual
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
A__ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__):
if i == len(UpperCAmelCase__) - 1:
A__ = -1
else:
A__ = timesteps[i + 1]
A__ = scheduler.previous_timestep(UpperCAmelCase__)
A__ = prev_t.item()
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase__ , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 1, 0]
A__ = len(UpperCAmelCase__)
with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
| 87 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11 | 0 |
"""simple docstring"""
def _snake_case ( __snake_case : list ):
"""simple docstring"""
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 88 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """blip_2_vision_model"""
def __init__( self, lowerCamelCase=14_08, lowerCamelCase=61_44, lowerCamelCase=39, lowerCamelCase=16, lowerCamelCase=2_24, lowerCamelCase=14, lowerCamelCase="gelu", lowerCamelCase=0.0_0_0_0_1, lowerCamelCase=0.0, lowerCamelCase=1E-10, lowerCamelCase=True, **lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
_lowercase : Optional[int] = hidden_size
_lowercase : Dict = intermediate_size
_lowercase : Dict = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : Tuple = patch_size
_lowercase : str = image_size
_lowercase : Optional[int] = initializer_range
_lowercase : Any = attention_dropout
_lowercase : List[str] = layer_norm_eps
_lowercase : Dict = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase)
_lowercase , _lowercase : str = cls.get_config_dict(lowerCamelCase, **lowerCamelCase)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type') == "blip-2":
_lowercase : Tuple = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCamelCase, **lowerCamelCase)
class _lowerCamelCase( _a ):
lowercase_ : Union[str, Any] = """blip_2_qformer"""
def __init__( self, lowerCamelCase=3_05_22, lowerCamelCase=7_68, lowerCamelCase=12, lowerCamelCase=12, lowerCamelCase=30_72, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=0.0_2, lowerCamelCase=1E-12, lowerCamelCase=0, lowerCamelCase="absolute", lowerCamelCase=2, lowerCamelCase=14_08, **lowerCamelCase, ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase)
_lowercase : Union[str, Any] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : str = hidden_act
_lowercase : Optional[Any] = intermediate_size
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : int = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Union[str, Any] = position_embedding_type
_lowercase : str = cross_attention_frequency
_lowercase : List[str] = encoder_hidden_size
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, **lowerCamelCase) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCamelCase)
_lowercase , _lowercase : List[str] = cls.get_config_dict(lowerCamelCase, **lowerCamelCase)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type') == "blip-2":
_lowercase : str = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls, 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCamelCase, **lowerCamelCase)
class _lowerCamelCase( _a ):
lowercase_ : Dict = """blip-2"""
lowercase_ : Optional[int] = True
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=32, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase)
if vision_config is None:
_lowercase : int = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.')
if qformer_config is None:
_lowercase : Any = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.')
if text_config is None:
_lowercase : Tuple = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
_lowercase : Dict = BlipaVisionConfig(**lowerCamelCase)
_lowercase : List[Any] = BlipaQFormerConfig(**lowerCamelCase)
_lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowercase : List[Any] = CONFIG_MAPPING[text_model_type](**lowerCamelCase)
_lowercase : Optional[int] = self.text_config.tie_word_embeddings
_lowercase : Dict = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Optional[int] = 1.0
_lowercase : Union[str, Any] = 0.0_2
@classmethod
def UpperCamelCase ( cls, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **lowerCamelCase, )
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = copy.deepcopy(self.__dict__)
_lowercase : int = self.vision_config.to_dict()
_lowercase : Optional[int] = self.qformer_config.to_dict()
_lowercase : Union[str, Any] = self.text_config.to_dict()
_lowercase : List[str] = self.__class__.model_type
return output
| 89 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = r'''.*/layers_(\d+)'''
_a = key
if re.match(__A , __A):
_a = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __A)
_a = r'''(encoder|decoder)\/'''
if re.match(__A , __A):
_a = re.match(__A , __A).groups()
if groups[0] == "encoder":
_a = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __A)
elif groups[0] == "decoder":
_a = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __A)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(__A):
_a = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(__A)
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import regex as re
with open(__A , '''r''') as f:
_a = f.read()
_a = re.findall(r'''(.*) = ([0-9.]*)''' , __A)
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(__A) if '''.''' in value else int(__A)
_a = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __A)[0]
_a = str(activation[1])
_a = num_experts
_a = SwitchTransformersConfig(**__A)
return config
def lowerCAmelCase (__A , __A , __A=None , __A="./" , __A=8):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_a = checkpoints.load_tax_checkpoint(__A)
if gin_file is not None:
_a = convert_gin_to_config(__A , __A)
else:
_a = SwitchTransformersConfig.from_pretrained(__A)
_a = SwitchTransformersForConditionalGeneration(__A)
_a = flax_params['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = rename_keys(__A)
_a = unflatten_dict(__A , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 11 | 0 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class a__ ( a__ ):
'''simple docstring'''
def __lt__( self , lowerCamelCase_ ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self , lowerCamelCase_ ) -> str:
return self[-1] == other[-1]
def _snake_case ( A ) -> list:
lowerCAmelCase__ = []
# sort into stacks
for element in collection:
lowerCAmelCase__ = Stack([element] )
lowerCAmelCase__ = bisect_left(A , A )
if i != len(A ):
stacks[i].append(A )
else:
stacks.append(A )
# use a heap-based merge to merge stack efficiently
lowerCAmelCase__ = merge(*(reversed(A ) for stack in stacks) )
return collection
if __name__ == "__main__":
__UpperCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted)) | 90 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] ,**A_ : Union[str, Any] ) -> str:
super().__init__(**A_ )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : Dict ,A_ : Union[np.ndarray, bytes, str] ,**A_ : List[Any] ) -> List[str]:
return super().__call__(A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,**A_ : List[str] ) -> Any:
A = {}
if "candidate_labels" in kwargs:
A = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
A = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[str] ,A_ : Tuple=None ,A_ : Tuple="This is a sound of {}." ) -> Union[str, Any]:
if isinstance(A_ ,A_ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A = requests.get(A_ ).content
else:
with open(A_ ,'rb' ) as f:
A = f.read()
if isinstance(A_ ,A_ ):
A = ffmpeg_read(A_ ,self.feature_extractor.sampling_rate )
if not isinstance(A_ ,np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
A = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='pt' )
A = candidate_labels
A = [hypothesis_template.format(A_ ) for x in candidate_labels]
A = self.tokenizer(A_ ,return_tensors=self.framework ,padding=A_ )
A = [text_inputs]
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ) -> Tuple:
A = model_inputs.pop('candidate_labels' )
A = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] ,A_ ):
A = text_inputs[0]
else:
# Batching case.
A = text_inputs[0][0]
A = self.model(**A_ ,**A_ )
A = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ) -> int:
A = model_outputs.pop('candidate_labels' )
A = model_outputs['logits'][0]
if self.framework == "pt":
A = logits.softmax(dim=0 )
A = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
A = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(A_ ,A_ ) ,key=lambda A_ : -x[0] )
]
return result | 91 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'longformer'
def __init__( self : List[str] , UpperCAmelCase__ : Union[List[int], int] = 512 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 30522 , UpperCAmelCase__ : int = 768 , UpperCAmelCase__ : int = 12 , UpperCAmelCase__ : int = 12 , UpperCAmelCase__ : int = 3072 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1E-12 , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Dict =attention_window
lowercase : Any =sep_token_id
lowercase : List[Any] =bos_token_id
lowercase : Optional[int] =eos_token_id
lowercase : List[str] =vocab_size
lowercase : Any =hidden_size
lowercase : Optional[int] =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : List[Any] =hidden_act
lowercase : Optional[int] =intermediate_size
lowercase : str =hidden_dropout_prob
lowercase : str =attention_probs_dropout_prob
lowercase : Tuple =max_position_embeddings
lowercase : List[Any] =type_vocab_size
lowercase : Optional[Any] =initializer_range
lowercase : int =layer_norm_eps
lowercase : List[Any] =onnx_export
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Any , UpperCAmelCase__ : "PretrainedConfig" , UpperCAmelCase__ : str = "default" , UpperCAmelCase__ : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase : Tuple =True
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Optional[Any] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Union[str, Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any =super().outputs
if self.task == "default":
lowercase : List[Any] ={0: '''batch'''}
return outputs
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[TensorType] = None , ):
'''simple docstring'''
lowercase : Optional[int] =super().generate_dummy_inputs(
preprocessor=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase : Any =torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
lowercase : List[str] =1
return inputs
| 92 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
'''simple docstring'''
def __init__(self ) -> Dict:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def a__ (self , A ) -> Dict:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class __A ( A ):
'''simple docstring'''
def a__ (self , A , *A , **A ) -> Optional[Any]:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __A ( A ):
'''simple docstring'''
def a__ (self , A , A ) -> int:
"""simple docstring"""
return output + 1
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
self.assertEqual(test_model._hf_hook , A )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
add_hook_to_module(A , A , append=A )
self.assertEqual(isinstance(test_model._hf_hook , A ) , A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(x + 1 )
_a = test_model(x + 2 )
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , A , atol=1E-5 )
def a__ (self ) -> str:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , output + 2 , atol=1E-5 )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a = True
_a = test_model(A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(A , AlignDevicesHook(io_same_device=A ) )
_a = torch.randn(2 , 3 ).to(0 )
_a = model(A )
self.assertEqual(output.device , torch.device(0 ) )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
_a = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(A , execution_device=A , offload=A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(A , execution_device=A , offload=A , offload_buffers=A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() , offload_buffers=A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 11 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__A = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowerCAmelCase__ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ :Any = pipe(
image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
lowerCAmelCase__ :Any = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :List[str] = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 93 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ReformerTokenizer
UpperCamelCase_ = ReformerTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = True
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
super().setUp()
lowercase : Optional[Any] =ReformerTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] ='''<s>'''
lowercase : List[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : Any =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCAmelCase ) , 1000 )
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase : List[str] =self.get_tokenizer()
lowercase : int =self.get_rust_tokenizer()
lowercase : Any ='''I was born in 92000, and this is falsé.'''
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase )
lowercase : List[Any] =rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : List[Any] =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowercase : Optional[int] =rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[Any] =self.get_rust_tokenizer()
lowercase : str =tokenizer.encode(UpperCAmelCase )
lowercase : Any =rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : int=15 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase : Tuple =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# Simple input
lowercase : List[str] ='''This is a simple input'''
lowercase : List[Any] =['''This is a simple input 1''', '''This is a simple input 2''']
lowercase : Optional[int] =('''This is a simple input''', '''This is a pair''')
lowercase : List[Any] =[
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding='''max_length''' , )
def A__ ( self : str ) -> int:
'''simple docstring'''
pass
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =ReformerTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
lowercase : str =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
lowercase : Any =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : Dict =tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : str =tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple ='''Hello World!'''
lowercase : Any =[126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@slow
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =(
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase : Union[str, Any] =[
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCAmelCase , self.big_tokenizer.encode(UpperCAmelCase ) )
@require_torch
@slow
def A__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase : List[Any] =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : Union[str, Any] =''' '''.join(UpperCAmelCase )
lowercase : int =self.big_tokenizer.encode_plus(UpperCAmelCase , return_tensors='''pt''' )
lowercase : List[str] =self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
lowercase : Optional[Any] =ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase : str =encoded_sequence['''input_ids'''].shape
lowercase : Any =ReformerModel(UpperCAmelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase )
model(**UpperCAmelCase )
@slow
def A__ ( self : int ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] ={'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase : Optional[Any] =[
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=UpperCAmelCase , sequences=UpperCAmelCase , )
| 94 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = act_dim
_a = state_dim
_a = hidden_size
_a = max_length
_a = is_training
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_a = random_attention_mask((self.batch_size, self.seq_length) )
_a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ (self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ (self , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = DecisionTransformerModel(config=A )
model.to(A )
model.eval()
_a = model(A , A , A , A , A , A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] = ()
__lowerCamelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = DecisionTransformerModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DecisionTransformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A )] , A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 2 # number of steps of autoregressive prediction we will perform
_a = 10 # defined by the RL environment, may be normalized
_a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_a = model.to(A )
_a = model.config
torch.manual_seed(0 )
_a = torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ) # env.reset()
_a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A )
_a = torch.tensor(A , device=A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a = state
_a = torch.zeros(1 , 0 , config.act_dim , device=A , dtype=torch.floataa )
_a = torch.zeros(1 , 0 , device=A , dtype=torch.floataa )
_a = torch.tensor(0 , device=A , dtype=torch.long ).reshape(1 , 1 )
for step in range(A ):
_a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A )] , dim=1 )
_a = torch.cat([rewards, torch.zeros(1 , 1 , device=A )] , dim=1 )
_a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a , _a , _a = model(
states=A , actions=A , rewards=A , returns_to_go=A , timesteps=A , attention_mask=A , return_dict=A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a , _a , _a , _a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ),
1.0,
False,
{},
)
_a = action_pred[0, -1]
_a = torch.cat([states, state] , dim=1 )
_a = returns_to_go[0, -1] - reward
_a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a = torch.cat(
[timesteps, torch.ones((1, 1) , device=A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 11 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = LongformerTokenizer
__magic_name__ = True
__magic_name__ = LongformerTokenizerFast
__magic_name__ = True
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase_ : List[str] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ : Optional[int] = {"unk_token": "<unk>"}
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Any , **lowerCAmelCase_ : List[str] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict ) -> int:
UpperCAmelCase_ : str = "lower newer"
UpperCAmelCase_ : Tuple = "lower newer"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : int = "lower newer"
UpperCAmelCase_ : Optional[int] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase_ : List[str] = tokenizer.tokenize(lowerCAmelCase_ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase_ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase_ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Any = "Encode this sequence."
UpperCAmelCase_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
UpperCAmelCase_ : str = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing spaces after special tokens
UpperCAmelCase_ : Dict = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ )} ) # mask token has a left space
UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = "Encode <mask> sequence"
UpperCAmelCase_ : int = "Encode <mask>sequence"
UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : int = encoded.index(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Any = encoded.index(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = "A, <mask> AllenNLP sentence."
UpperCAmelCase_ : Any = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase_ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase_ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase_ )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Union[str, Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ : Tuple = f"""{text_of_1_token} {text_of_1_token}"""
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
UpperCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
UpperCAmelCase_ : Dict = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
UpperCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
UpperCAmelCase_ : Any = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ) + 1, 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ), 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
UpperCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ), 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
| 95 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
return len(set(__A)) == len(__A)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __A :
def lowerCamelCase__ ( self : List[Any] , __snake_case : Tuple ) -> Tuple:
raise NotImplementedError()
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
raise NotImplementedError()
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[int] , __snake_case : "AutoTokenizer" , __snake_case : bool = False , **__snake_case : Union[str, Any] ) -> Dict:
__magic_name__: Union[str, Any] = tokenizer
__magic_name__: Optional[Any] = skip_prompt
__magic_name__: Optional[int] = decode_kwargs
# variables used in the streaming process
__magic_name__: Dict = []
__magic_name__: List[Any] = 0
__magic_name__: Any = True
def lowerCamelCase__ ( self : Dict , __snake_case : Any ) -> List[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
__magic_name__: Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__magic_name__: List[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__magic_name__: Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
__magic_name__: Dict = text[self.print_len :]
__magic_name__: List[str] = []
__magic_name__: str = 0
# If the last token is a CJK character, we print the characters.
elif len(__snake_case ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__magic_name__: Any = text[self.print_len :]
self.print_len += len(__snake_case )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__magic_name__: Optional[int] = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(__snake_case )
self.on_finalized_text(__snake_case )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
__magic_name__: Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__magic_name__: str = text[self.print_len :]
__magic_name__: List[Any] = []
__magic_name__: Optional[Any] = 0
else:
__magic_name__: List[str] = """"""
__magic_name__: List[Any] = True
self.on_finalized_text(__snake_case , stream_end=__snake_case )
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : str , __snake_case : bool = False ) -> str:
print(__snake_case , flush=__snake_case , end="""""" if not stream_end else None )
def lowerCamelCase__ ( self : str , __snake_case : Optional[Any] ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , __snake_case : "AutoTokenizer" , __snake_case : bool = False , __snake_case : Optional[float] = None , **__snake_case : Optional[Any] ) -> List[str]:
super().__init__(__snake_case , __snake_case , **__snake_case )
__magic_name__: Tuple = Queue()
__magic_name__: Any = None
__magic_name__: Dict = timeout
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : str , __snake_case : bool = False ) -> Dict:
self.text_queue.put(__snake_case , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Union[str, Any] ) -> Union[str, Any]:
return self
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
__magic_name__: Tuple = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 96 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if len(__A) == 0:
return False
_a = len(__A) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __A)
else:
return binary_search(a_list[midpoint + 1 :] , __A)
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be found in the list:\n").strip())
lowercase_ = "" if binary_search(sequence, target) else "not "
print(F"""{target} was {not_str}found in {sequence}""")
| 11 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowercase__:
"""simple docstring"""
a :Optional[Union[str, Path]] = None
a :bool = False
a :bool = False
a :bool = False
a :Optional[Dict] = None
a :Optional[str] = None
a :bool = False
a :bool = False
a :bool = False
a :bool = True
a :Optional[int] = None
a :int = 1
a :Optional[Union[str, bool]] = None
a :bool = False
a :Optional[Dict] = None
a :Optional[str] = None
def _lowercase ( self : List[Any] ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(SCREAMING_SNAKE_CASE_ ) for k, v in self.__dict__.items()} )
| 97 |
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = len(A )
_a = [0] * len_array
if len_array > 0:
_a = array[0]
for i in range(1 , A ):
_a = self.prefix_sum[i - 1] + array[i]
def a__ (self , A , A ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self , A ) -> bool:
"""simple docstring"""
_a = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ : Optional[int] = 50_00_00
lowercase__ , lowercase__ : List[str] = os.path.split(__file__)
lowercase__ : str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a__ ( lowercase : datasets.Dataset, **lowercase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = dataset.map(**lowercase )
@get_duration
def a__ ( lowercase : datasets.Dataset, **lowercase : Optional[Any] ) -> str:
"""simple docstring"""
_UpperCamelCase = dataset.filter(**lowercase )
def a__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
_UpperCamelCase = generate_example_dataset(
os.path.join(lowercase, '''dataset.arrow''' ), lowercase, num_examples=lowercase )
_UpperCamelCase = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=lowercase )
def tokenize(lowercase : List[Any] ):
return tokenizer(examples['''text'''] )
_UpperCamelCase = map(lowercase )
_UpperCamelCase = map(lowercase, batched=lowercase )
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''numpy''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''pandas''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
_UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase )
_UpperCamelCase = map(lowercase, function=lowercase, batched=lowercase )
_UpperCamelCase = filter(lowercase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase, '''wb''' ) as f:
f.write(json.dumps(lowercase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 98 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A)
if n > 1:
factors.append(__A)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """vivit"""
def __init__( self , __A=224 , __A=32 , __A=[2, 16, 16] , __A=3 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu_fast" , __A=0.0 , __A=0.0 , __A=0.02 , __A=1E-06 , __A=True , **__A , ):
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = num_frames
__a = tubelet_size
__a = num_channels
__a = qkv_bias
super().__init__(**__A )
| 99 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
def __snake_case ( ) -> int:
return 1
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ = 2_0_0 ) -> int:
return two_pound(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 100 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ = logging.getLogger(__name__)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if os.path.exists(__A):
if os.path.exists(os.path.join(__A , '''config.json''')) and os.path.isfile(
os.path.join(__A , '''config.json''')):
os.remove(os.path.join(__A , '''config.json'''))
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''')) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''')):
os.remove(os.path.join(__A , '''pytorch_model.bin'''))
else:
os.makedirs(__A)
model.save_pretrained(__A)
def lowerCAmelCase (__A , __A=False):
"""simple docstring"""
_a = 2
if unlogit:
_a = torch.pow(__A , __A)
_a = p * torch.log(__A)
_a = 0
return -plogp.sum(dim=-1)
def lowerCAmelCase (__A):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__A))))
for row in range(len(__A)):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data))
else:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data))
def lowerCAmelCase (__A , __A , __A , __A=True , __A=True , __A=None , __A=False):
"""simple docstring"""
_a , _a = model.config.num_hidden_layers, model.config.num_attention_heads
_a = torch.zeros(__A , __A).to(args.device)
_a = torch.zeros(__A , __A).to(args.device)
if head_mask is None:
_a = torch.ones(__A , __A).to(args.device)
head_mask.requires_grad_(requires_grad=__A)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a = None
_a = 0.0
_a = 0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0])):
_a = tuple(t.to(args.device) for t in inputs)
((_a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a = model(__A , labels=__A , head_mask=__A)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a , _a , _a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A):
_a = entropy(attn.detach() , __A)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a = 2
_a = torch.pow(torch.pow(__A , __A).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
_a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''')
print_ad_tensor(__A)
if compute_importance:
logger.info('''Head importance scores''')
print_ad_tensor(__A)
logger.info('''Head ranked by importance scores''')
_a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
_a = torch.arange(
head_importance.numel() , device=args.device)
_a = head_ranks.view_as(__A)
print_ad_tensor(__A)
return attn_entropy, head_importance, total_loss
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a , _a , _a = compute_heads_importance(__A , __A , __A , compute_entropy=__A)
_a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold)
_a = torch.ones_like(__A)
_a = max(1 , int(new_head_mask.numel() * args.masking_amount))
_a = original_score
while current_score >= original_score * args.masking_threshold:
_a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a = float('''Inf''')
_a = head_importance.view(-1).sort()[1]
if len(__A) <= num_to_mask:
print('''BREAK BY num_to_mask''')
break
# mask heads
_a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist()))
_a = new_head_mask.view(-1)
_a = 0.0
_a = new_head_mask.view_as(__A)
_a = new_head_mask.clone().detach()
print_ad_tensor(__A)
# Compute metric and head importance again
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A)
_a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''')
print_ad_tensor(__A)
np.save(os.path.join(args.output_dir , '''head_mask.npy''') , head_mask.detach().cpu().numpy())
return head_mask
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A)
_a = 1 / loss
_a = datetime.now() - before_time
_a = sum(p.numel() for p in model.parameters())
_a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A))
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A):
_a = [
v,
]
assert sum(len(__A) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A)
_a = sum(p.numel() for p in model.parameters())
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_a = 1 / loss
_a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A)
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100)
save_model(__A , args.output_dir)
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''')
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''')
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''')
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''')
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''')
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''')
parser.add_argument('''--seed''' , type=__A , default=42)
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''')
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''')
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
_a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''')
_a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
_a = torch.device('''cuda''' , args.local_rank)
_a = 1
torch.distributed.init_process_group(backend='''nccl''') # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1)))
_a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
_a = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A)
elif args.n_gpu > 1:
_a = nn.DataParallel(__A)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A)
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin'''))
logger.info('''Training/evaluation parameters %s''' , __A)
# Prepare dataset
_a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
_a = (torch.from_numpy(__A),)
_a = TensorDataset(*__A)
_a = RandomSampler(__A)
_a = DataLoader(__A , sampler=__A , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a = mask_heads(__A , __A , __A)
prune_heads(__A , __A , __A , __A)
if __name__ == "__main__":
main()
| 11 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_attention_mask
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : List[Any] = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_choices
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : str = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class_name.from_pretrained('roberta-base' , from_pt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 101 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__magic_name__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
__magic_name__ : List[Any] = """
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=8 ):
UpperCamelCase : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=512 ):
UpperCamelCase : Optional[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCamelCase : Any = np.array(pil_image.convert("""RGB""" ) )
UpperCamelCase : List[Any] = arr.astype(np.floataa ) / 1_27.5 - 1
UpperCamelCase : Union[str, Any] = np.transpose(SCREAMING_SNAKE_CASE , [2, 0, 1] )
UpperCamelCase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
return image
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_A , scheduler=_A , movq=_A , )
UpperCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : str = min(int(num_inference_steps * strength ) , _A )
UpperCamelCase : List[str] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self , _A , _A , _A , _A , _A , _A , _A=None ):
'''simple docstring'''
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}""" )
UpperCamelCase : Optional[int] = image.to(device=_A , dtype=_A )
UpperCamelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase : List[str] = image
else:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(_A , _A ):
UpperCamelCase : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
UpperCamelCase : Union[str, Any] = torch.cat(_A , dim=0 )
else:
UpperCamelCase : List[str] = self.movq.encode(_A ).latent_dist.sample(_A )
UpperCamelCase : Dict = self.movq.config.scaling_factor * init_latents
UpperCamelCase : Tuple = torch.cat([init_latents] , dim=0 )
UpperCamelCase : List[str] = init_latents.shape
UpperCamelCase : int = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
UpperCamelCase : int = self.scheduler.add_noise(_A , _A , _A )
UpperCamelCase : Optional[Any] = init_latents
return latents
def _a ( self , _A=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCamelCase : List[str] = torch.device(f"""cuda:{gpu_id}""" )
UpperCamelCase : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def _a ( self , _A=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCamelCase : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase , UpperCamelCase : Tuple = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
# We'll offload the last model manually.
UpperCamelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self , _A , _A , _A , _A = 5_1_2 , _A = 5_1_2 , _A = 1_0_0 , _A = 4.0 , _A = 0.3 , _A = 1 , _A = None , _A = "pil" , _A = True , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self._execution_device
UpperCamelCase : List[Any] = guidance_scale > 1.0
if isinstance(_A , _A ):
UpperCamelCase : Dict = torch.cat(_A , dim=0 )
UpperCamelCase : Tuple = image_embeds.shape[0]
if isinstance(_A , _A ):
UpperCamelCase : int = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase : str = image_embeds.repeat_interleave(_A , dim=0 )
UpperCamelCase : Optional[Any] = negative_image_embeds.repeat_interleave(_A , dim=0 )
UpperCamelCase : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_A )
if not isinstance(_A , _A ):
UpperCamelCase : Tuple = [image]
if not all(isinstance(_A , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(_A ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCamelCase : Any = torch.cat([prepare_image(_A , _A , _A ) for i in image] , dim=0 )
UpperCamelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=_A )
UpperCamelCase : str = self.movq.encode(_A )["""latents"""]
UpperCamelCase : int = latents.repeat_interleave(_A , dim=0 )
self.scheduler.set_timesteps(_A , device=_A )
UpperCamelCase , UpperCamelCase : List[Any] = self.get_timesteps(_A , _A , _A )
UpperCamelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase , UpperCamelCase : Optional[int] = downscale_height_and_width(_A , _A , self.movq_scale_factor )
UpperCamelCase : Tuple = self.prepare_latents(
_A , _A , _A , _A , image_embeds.dtype , _A , _A )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : Dict = {"""image_embeds""": image_embeds}
UpperCamelCase : int = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase , UpperCamelCase : List[Any] = noise_pred.chunk(2 )
UpperCamelCase , UpperCamelCase : Optional[Any] = variance_pred.chunk(2 )
UpperCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase , UpperCamelCase : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase : List[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , )[0]
# post-processing
UpperCamelCase : Union[str, Any] = self.movq.decode(_A , force_not_quantize=_A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase : Optional[int] = image * 0.5 + 0.5
UpperCamelCase : Optional[Any] = image.clamp(0 , 1 )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase : Optional[Any] = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 102 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = (DDPMScheduler,)
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__lowerCamelCase )
return config
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1E-5
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = len(__lowerCamelCase )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
_snake_case = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
_snake_case = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
_snake_case = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_snake_case = pred_prev_sample
_snake_case = torch.sum(torch.abs(__lowerCamelCase ) )
_snake_case = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(prediction_type='''v_prediction''' )
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = len(__lowerCamelCase )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter
_snake_case = torch.manual_seed(0 )
for t in reversed(range(__lowerCamelCase ) ):
# 1. predict noise residual
_snake_case = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
_snake_case = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_snake_case = pred_prev_sample
_snake_case = torch.sum(torch.abs(__lowerCamelCase ) )
_snake_case = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCamelCase )
_snake_case = scheduler.timesteps
for i, timestep in enumerate(__lowerCamelCase ):
if i == len(__lowerCamelCase ) - 1:
_snake_case = -1
else:
_snake_case = timesteps[i + 1]
_snake_case = scheduler.previous_timestep(__lowerCamelCase )
_snake_case = prev_t.item()
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [1_0_0, 8_7, 5_0, 1, 0]
_snake_case = len(__lowerCamelCase )
with self.assertRaises(__lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**__lowerCamelCase )
_snake_case = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__lowerCamelCase )
| 103 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 0 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
UpperCamelCase = get_logger(__name__)
class UpperCamelCase__ ( enum.Enum ):
"""simple docstring"""
A__ : Tuple = "all_checks"
A__ : Optional[int] = "basic_checks"
A__ : Any = "no_checks"
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : Optional[dict], UpperCAmelCase_ : dict, UpperCAmelCase_ : Optional[Any]=None ) -> str:
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) )
if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) )
A__ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
A__ = " for " + verification_name if verification_name is not None else ""
if len(UpperCAmelCase_ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : Optional[dict], UpperCAmelCase_ : dict ) -> Tuple:
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) )
if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) )
A__ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCAmelCase_ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCAmelCase_ ) )
logger.info("All the splits matched successfully." )
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : bool = True ) -> dict:
"""simple docstring"""
if record_checksum:
A__ = shaaaa()
with open(UpperCAmelCase_, "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ), b"" ):
m.update(UpperCAmelCase_ )
A__ = m.hexdigest()
else:
A__ = None
return {"num_bytes": os.path.getsize(UpperCAmelCase_ ), "checksum": checksum}
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> int:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 104 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = OpenLlamaModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
_a = original_model(A ).last_hidden_state
_a = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
_a = scaled_model(A ).last_hidden_state
_a = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 11 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
UpperCamelCase__ : List[Any] = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
UpperCamelCase__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> str:
"""simple docstring"""
if "://" in dataset_path:
SCREAMING_SNAKE_CASE_ : Any = dataset_path.split('://' )[1]
return dataset_path
def __UpperCAmelCase ( lowerCamelCase_ : fsspec.AbstractFileSystem ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __UpperCAmelCase ( lowerCamelCase_ : fsspec.AbstractFileSystem , lowerCamelCase_ : str , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = not is_remote_filesystem(lowerCamelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) )
else:
fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : List[str] = threading.Lock()
| 105 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def a__ (self ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxAlbertModelTester(self )
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained('''albert-base-v2''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(A , attention_mask=A )[0]
_a = (1, 11, 768)
self.assertEqual(output.shape , A )
_a = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__snake_case :Any =logging.get_logger(__name__)
__snake_case :List[Any] ={
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : int = 'bloom'
A_ : Any = ['past_key_values']
A_ : Dict = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Any , __UpperCamelCase : Optional[int]=250_880 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : str=1e-5 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : List[Any]=False , **__UpperCamelCase : Dict , ) -> Any:
A = vocab_size
# Backward compatibility with n_embed kwarg
A = kwargs.pop('n_embed' , __UpperCamelCase )
A = hidden_size if n_embed is None else n_embed
A = n_layer
A = n_head
A = layer_norm_epsilon
A = initializer_range
A = use_cache
A = pretraining_tp
A = apply_residual_connection_post_layernorm
A = hidden_dropout
A = attention_dropout
A = bos_token_id
A = eos_token_id
A = slow_but_exact
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : str = version.parse('1.12' )
def __init__( self : Optional[int] , __UpperCamelCase : PretrainedConfig , __UpperCamelCase : str = "default" , __UpperCamelCase : List[PatchingSpec] = None , __UpperCamelCase : bool = False , ) -> Any:
super().__init__(__UpperCamelCase , task=__UpperCamelCase , patching_specs=__UpperCamelCase , use_past=__UpperCamelCase )
if not getattr(self._config , 'pad_token_id' , __UpperCamelCase ):
# TODO: how to do that better?
A = 0
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
A = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__UpperCamelCase , direction='inputs' , inverted_values_shape=__UpperCamelCase )
A = {0: 'batch', 1: 'past_sequence + sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __UpperCamelCase ( self : int ) -> int:
return self._config.n_layer
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
return self._config.n_head
@property
def __UpperCamelCase ( self : str ) -> float:
return 1e-3
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : "PreTrainedTokenizer" , __UpperCamelCase : int = -1 , __UpperCamelCase : int = -1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
A = super(__UpperCamelCase , self ).generate_dummy_inputs(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
# We need to order the input in the way they appears in the forward()
A = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A = self._config.hidden_size // self.num_attention_heads
A = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
A = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
A = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers )
]
A = common_inputs['attention_mask']
if self.use_past:
A = ordered_inputs['attention_mask'].dtype
A = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def __UpperCamelCase ( self : Optional[Any] ) -> int:
return 13 | 106 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_A = ''
_A = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_A , _A = 0, 0
# length[i] shows the length of palindromic substring with center i
_A = [1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_A = 0
for j in range(len(__snake_case ) ):
_A = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_A = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_A = j - k + 1 # noqa: E741
_A = j + k - 1
# update max_length and start position
if max_length < length[j]:
_A = length[j]
_A = j
# create that string
_A = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a: str = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self : Optional[int] , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : bool = True , **lowerCamelCase : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 224}
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="""crop_size""" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase = do_convert_rgb
def lowerCamelCase ( self : List[str] , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Dict , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase = get_resize_output_image_size(lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Dict , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Dict , ) -> Optional[int]:
"""simple docstring"""
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : List[str] , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : int = None , lowerCamelCase : bool = None , lowerCamelCase : float = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase : Any , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(lowerCamelCase , param_name="""size""" , default_to_square=lowerCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(lowerCamelCase , param_name="""crop_size""" , default_to_square=lowerCamelCase )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase ) | 108 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( _snake_case ):
__UpperCamelCase : Dict = ['image_processor', 'tokenizer']
__UpperCamelCase : Any = 'ViltImageProcessor'
__UpperCamelCase : List[str] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Dict ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : List[Any]=None ,**lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self : int ,lowerCamelCase : Dict ,lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowerCamelCase : bool = True ,lowerCamelCase : Union[bool, str, PaddingStrategy] = False ,lowerCamelCase : Union[bool, str, TruncationStrategy] = None ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : int = 0 ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[Union[str, TensorType]] = None ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCamelCase ,add_special_tokens=lowerCamelCase ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=lowerCamelCase ,stride=lowerCamelCase ,pad_to_multiple_of=lowerCamelCase ,return_token_type_ids=lowerCamelCase ,return_attention_mask=lowerCamelCase ,return_overflowing_tokens=lowerCamelCase ,return_special_tokens_mask=lowerCamelCase ,return_offsets_mapping=lowerCamelCase ,return_length=lowerCamelCase ,verbose=lowerCamelCase ,return_tensors=lowerCamelCase ,**lowerCamelCase ,)
# add pixel_values + pixel_mask
__SCREAMING_SNAKE_CASE = self.image_processor(lowerCamelCase ,return_tensors=lowerCamelCase )
encoding.update(lowerCamelCase )
return encoding
def UpperCAmelCase__ ( self : Optional[int] ,*lowerCamelCase : Any ,**lowerCamelCase : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ,*lowerCamelCase : Union[str, Any] ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase ,)
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase ,)
return self.image_processor
| 109 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
UpperCamelCase__ = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : Union[str, Any] = torch.permute(__A , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__A ):
# linear layer
UpperCAmelCase__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase__ : Any = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
if "metadata" in layer:
UpperCAmelCase__ : Optional[Any] = layer.split("""metadata""" )
UpperCAmelCase__ : str = """""".join(split_layer[0] )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
UpperCAmelCase__ : str = layer.split("""kvstore""" )
UpperCAmelCase__ : str = """""".join(split_layer[0] )[:-1]
UpperCAmelCase__ : Tuple = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
UpperCAmelCase__ : Optional[int] = layer.split("""/""" )
UpperCAmelCase__ : List[str] = """/""".join(split_layer[:-1] )
UpperCAmelCase__ : List[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase__ : int = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
UpperCAmelCase__ : Tuple = """file"""
else:
UpperCAmelCase__ : Any = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : List[Any] = rename_keys(__A )
UpperCAmelCase__ : Tuple = {}
for k, v in current_block.items():
UpperCAmelCase__ : Optional[Any] = v
UpperCAmelCase__ : Tuple = new_current_block
torch.save(__A , __A )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = WEIGHTS_NAME ) -> List[str]:
UpperCAmelCase__ : Union[str, Any] = convert_file_size_to_int(__A )
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
os.makedirs(__A , exist_ok=__A )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
UpperCAmelCase__ : str = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
UpperCAmelCase__ : Optional[Any] = flatten_dict(__A , sep="""/""" )
UpperCAmelCase__ : Optional[int] = {}
for layer in checkpoint_info.keys():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = get_key_and_tensorstore_dict(
__A , __A , __A )
if curr_real_layer_name in all_layers:
UpperCAmelCase__ : Optional[Any] = content
else:
UpperCAmelCase__ : Dict = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase__ : List[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase__ : Optional[Any] = torch.tensor(__A )
UpperCAmelCase__ : int = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __A )
UpperCAmelCase__ : List[Any] = """/""".join(__A )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase__ : List[str] = os.path.join(
__A , weights_name.replace(""".bin""" , F"""-{len(__A )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__A , __A )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Dict = raw_weights.to(getattr(__A , __A ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase__ : str = os.path.join(__A , weights_name.replace(""".bin""" , F"""-{len(__A )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__A , __A )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__A ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
for idx, shard in enumerate(__A ):
UpperCAmelCase__ : int = weights_name.replace(
""".bin""" , F"""-{idx+1:05d}-of-{len(__A ):05d}.bin""" ) # len(sharded_state_dicts):05d}
UpperCAmelCase__ : List[str] = os.path.join(__A , weights_name.replace(""".bin""" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__A , os.path.join(__A , __A ) )
UpperCAmelCase__ : List[str] = shard
for key in shard:
UpperCAmelCase__ : int = shard_file
# Add the metadata
UpperCAmelCase__ : Optional[int] = {"""total_size""": total_size}
UpperCAmelCase__ : Tuple = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__A , __A ) , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase__ : Union[str, Any] = json.dumps(__A , indent=2 , sort_keys=__A ) + """\n"""
f.write(__A )
return metadata, index
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ) -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase__ : Union[str, Any] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
UpperCAmelCase__ : Union[str, Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
UpperCAmelCase__ : int = TaTokenizer.from_pretrained("""t5-small""" )
UpperCAmelCase__ : Optional[int] = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
UpperCAmelCase__ : Dict = tokenizer(__A , return_tensors="""pt""" ).input_ids
UpperCAmelCase__ : Optional[int] = model.generate(__A , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 182 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = r'''.*/layers_(\d+)'''
_a = key
if re.match(__A , __A):
_a = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __A)
_a = r'''(encoder|decoder)\/'''
if re.match(__A , __A):
_a = re.match(__A , __A).groups()
if groups[0] == "encoder":
_a = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __A)
elif groups[0] == "decoder":
_a = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __A)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(__A):
_a = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(__A)
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import regex as re
with open(__A , '''r''') as f:
_a = f.read()
_a = re.findall(r'''(.*) = ([0-9.]*)''' , __A)
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(__A) if '''.''' in value else int(__A)
_a = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __A)[0]
_a = str(activation[1])
_a = num_experts
_a = SwitchTransformersConfig(**__A)
return config
def lowerCAmelCase (__A , __A , __A=None , __A="./" , __A=8):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_a = checkpoints.load_tax_checkpoint(__A)
if gin_file is not None:
_a = convert_gin_to_config(__A , __A)
else:
_a = SwitchTransformersConfig.from_pretrained(__A)
_a = SwitchTransformersForConditionalGeneration(__A)
_a = flax_params['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = rename_keys(__A)
_a = unflatten_dict(__A , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 11 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_snake_case : Optional[int] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_snake_case : Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 'maskformer'
a_ = {'hidden_size': 'mask_feature_size'}
a_ = ['resnet', 'swin']
a_ = ['detr']
def __init__( self : Any , lowerCAmelCase_ : Optional[int] = 2_5_6 , lowerCAmelCase_ : Optional[Any] = 2_5_6 , lowerCAmelCase_ : Optional[Any] = 0.1 , lowerCAmelCase_ : Union[str, Any] = False , lowerCAmelCase_ : Union[str, Any] = None , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Union[str, Any] = 0.02 , lowerCAmelCase_ : Optional[int] = 1.0 , lowerCAmelCase_ : List[str] = 1.0 , lowerCAmelCase_ : List[Any] = 1.0 , lowerCAmelCase_ : Dict = 20.0 , lowerCAmelCase_ : Tuple = None , **lowerCAmelCase_ : List[Any] , ) -> List[Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__lowerCAmelCase = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = backbone_config.pop('model_type' )
__lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase = config_class.from_dict(lowerCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__lowerCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
__lowerCAmelCase = (
decoder_config.pop('model_type' ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCAmelCase = CONFIG_MAPPING[decoder_type]
__lowerCAmelCase = config_class.from_dict(lowerCAmelCase_ )
__lowerCAmelCase = backbone_config
__lowerCAmelCase = decoder_config
# main feature dimension for the model
__lowerCAmelCase = fpn_feature_size
__lowerCAmelCase = mask_feature_size
# initializer
__lowerCAmelCase = init_std
__lowerCAmelCase = init_xavier_std
# Hungarian matcher && loss
__lowerCAmelCase = cross_entropy_weight
__lowerCAmelCase = dice_weight
__lowerCAmelCase = mask_weight
__lowerCAmelCase = use_auxiliary_loss
__lowerCAmelCase = no_object_weight
__lowerCAmelCase = output_auxiliary_logits
__lowerCAmelCase = self.decoder_config.encoder_attention_heads
__lowerCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**lowerCAmelCase_ )
@classmethod
def lowercase ( cls : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
return cls(
backbone_config=lowerCAmelCase_ , decoder_config=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowercase ( self : List[Any] ) -> Dict[str, any]:
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
__lowerCAmelCase = self.backbone_config.to_dict()
__lowerCAmelCase = self.decoder_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase_ : Tuple = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Tuple:
"""simple docstring"""
a_ : List[str] = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(__A , __A )
UpperCAmelCase_ : Tuple = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> Optional[int]:
"""simple docstring"""
a_ : Dict = list(s_dict.keys() )
for key in keys:
a_ : Union[str, Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a_ : List[str] = new_key.replace(__A , __A )
print(F"""{key} -> {new_key}""" )
a_ : Any = s_dict.pop(__A )
return s_dict
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ : List[Any] = emb.weight.shape
a_ : int = nn.Linear(__A , __A , bias=__A )
a_ : Union[str, Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(__A , exist_ok=__A )
a_ : Optional[int] = os.path.basename(__A )
a_ : List[str] = url.split('/' )[-2]
a_ : Optional[int] = os.path.join(__A , __A )
if os.path.exists(__A ) and not os.path.isfile(__A ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(__A ):
a_ : Optional[Any] = open(__A , 'rb' ).read()
if hashlib.shaaaa(__A ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(__A ) as source, open(__A , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=__A , unit_divisor=10_24 ) as loop:
while True:
a_ : Union[str, Any] = source.read(81_92 )
if not buffer:
break
output.write(__A )
loop.update(len(__A ) )
a_ : str = open(__A , 'rb' ).read()
if hashlib.shaaaa(__A ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Tuple ) -> List[str]:
"""simple docstring"""
if ".pt" not in checkpoint_path:
a_ : int = _download(_MODELS[checkpoint_path] )
else:
a_ : Optional[Any] = torch.load(__A , map_location='cpu' )
a_ : Optional[int] = original_checkpoint['dims']
a_ : str = original_checkpoint['model_state_dict']
a_ : Union[str, Any] = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(__A )
rename_keys(__A )
a_ : int = True
a_ : Tuple = state_dict['decoder.layers.0.fc1.weight'].shape[0]
a_ : List[str] = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=__A , decoder_ffn_dim=__A , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
a_ : Dict = WhisperForConditionalGeneration(__A )
a_ , a_ : Union[str, Any] = model.model.load_state_dict(__A , strict=__A )
if len(__A ) > 0 and not set(__A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a_ : str = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a_ : Optional[int] = proj_out_weights
model.save_pretrained(__A )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 570 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11 | 0 |
def A ( _lowerCamelCase = 4_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
_lowerCAmelCase , _lowerCAmelCase : int = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__A )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = b, a + b
return sum(__A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 500 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
'''simple docstring'''
def __init__(self ) -> Dict:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def a__ (self , A ) -> Dict:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class __A ( A ):
'''simple docstring'''
def a__ (self , A , *A , **A ) -> Optional[Any]:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __A ( A ):
'''simple docstring'''
def a__ (self , A , A ) -> int:
"""simple docstring"""
return output + 1
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
self.assertEqual(test_model._hf_hook , A )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
add_hook_to_module(A , A , append=A )
self.assertEqual(isinstance(test_model._hf_hook , A ) , A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(x + 1 )
_a = test_model(x + 2 )
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , A , atol=1E-5 )
def a__ (self ) -> str:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , output + 2 , atol=1E-5 )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a = True
_a = test_model(A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(A , AlignDevicesHook(io_same_device=A ) )
_a = torch.randn(2 , 3 ).to(0 )
_a = model(A )
self.assertEqual(output.device , torch.device(0 ) )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
_a = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(A , execution_device=A , offload=A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(A , execution_device=A , offload=A , offload_buffers=A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() , offload_buffers=A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 11 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class _A :
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[Any] = str(id_ )
snake_case : Optional[Any] = None
snake_case : Tuple = None
snake_case : Any = []
snake_case : List[str] = {} # {vertex:distance}
def __lt__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.key < other.key
def __repr__( self ):
'''simple docstring'''
return self.id
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
self.neighbors.append(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Optional[Any] = weight
def lowercase ( __A : Dict , __A : Optional[int] , __A : Tuple , __A : List[Any] ) -> List[str]:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __A )
graph[b - 1].add_edge(graph[a - 1] , __A )
def lowercase ( __A : Union[str, Any] , __A : str ) -> int:
'''simple docstring'''
snake_case : Optional[int] = []
for u in graph:
snake_case : Tuple = math.inf
snake_case : Dict = None
snake_case : Union[str, Any] = 0
snake_case : str = graph[:]
while q:
snake_case : List[str] = min(__A )
q.remove(__A )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case : int = u
snake_case : List[Any] = u.edges[v.id]
for i in range(1 , len(__A ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase ( __A : Optional[Any] , __A : Optional[int] ) -> List[Any]:
'''simple docstring'''
for u in graph:
snake_case : str = math.inf
snake_case : Optional[Any] = None
snake_case : int = 0
snake_case : List[Any] = list(__A )
hq.heapify(__A )
while h:
snake_case : Optional[Any] = hq.heappop(__A )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case : Optional[Any] = u
snake_case : Optional[Any] = u.edges[v.id]
hq.heapify(__A )
for i in range(1 , len(__A ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowercase ( snake_case__):
"""simple docstring"""
a__ : BigBirdConfig
a__ : jnp.dtype = jnp.floataa
a__ : bool = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
super().setup()
UpperCAmelCase_= nn.Dense(5 , dtype=self.dtype )
def __call__( self : Dict , *__UpperCAmelCase : str , **__UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase_= super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Any = FlaxBigBirdForNaturalQuestionsModule
def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : str ,lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
def cross_entropy(lowerCAmelCase_ : int ,lowerCAmelCase_ : int ,lowerCAmelCase_ : Optional[int]=None ):
UpperCAmelCase_= logits.shape[-1]
UpperCAmelCase_= (labels[..., None] == jnp.arange(__A )[None]).astype("""f4""" )
UpperCAmelCase_= jax.nn.log_softmax(__A ,axis=-1 )
UpperCAmelCase_= -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
UpperCAmelCase_= reduction(__A )
return loss
UpperCAmelCase_= partial(__A ,reduction=jnp.mean )
UpperCAmelCase_= cross_entropy(__A ,__A )
UpperCAmelCase_= cross_entropy(__A ,__A )
UpperCAmelCase_= cross_entropy(__A ,__A )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowercase :
"""simple docstring"""
a__ : str = "google/bigbird-roberta-base"
a__ : int = 3000
a__ : int = 1_0500
a__ : int = 128
a__ : int = 3
a__ : int = 1
a__ : int = 5
# tx_args
a__ : float = 3e-5
a__ : float = 0.0
a__ : int = 2_0000
a__ : float = 0.0095
a__ : str = "bigbird-roberta-natural-questions"
a__ : str = "training-expt"
a__ : str = "data/nq-training.jsonl"
a__ : str = "data/nq-validation.jsonl"
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
os.makedirs(self.base_dir , exist_ok=__UpperCAmelCase )
UpperCAmelCase_= os.path.join(self.base_dir , self.save_dir )
UpperCAmelCase_= self.batch_size_per_device * jax.device_count()
@dataclass
class lowercase :
"""simple docstring"""
a__ : int
a__ : int = 4096 # no dynamic padding on TPUs
def __call__( self : Optional[int] , __UpperCAmelCase : str ) -> Tuple:
UpperCAmelCase_= self.collate_fn(__UpperCAmelCase )
UpperCAmelCase_= jax.tree_util.tree_map(__UpperCAmelCase , __UpperCAmelCase )
return batch
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase_, UpperCAmelCase_= self.fetch_inputs(features["""input_ids"""] )
UpperCAmelCase_= {
"""input_ids""": jnp.array(__UpperCAmelCase , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(__UpperCAmelCase , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str ) -> Tuple:
UpperCAmelCase_= [self._fetch_inputs(__UpperCAmelCase ) for ids in input_ids]
return zip(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : int ) -> Optional[Any]:
UpperCAmelCase_= [1 for _ in range(len(__UpperCAmelCase ) )]
while len(__UpperCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Any=None ) -> Dict:
'''simple docstring'''
if seed is not None:
UpperCAmelCase_= dataset.shuffle(seed=__A )
for i in range(len(__A ) // batch_size ):
UpperCAmelCase_= dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__A )
@partial(jax.pmap ,axis_name="""batch""" )
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : List[Any] ,**lowerCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
def loss_fn(lowerCAmelCase_ : List[Any] ):
UpperCAmelCase_= model_inputs.pop("""start_labels""" )
UpperCAmelCase_= model_inputs.pop("""end_labels""" )
UpperCAmelCase_= model_inputs.pop("""pooled_labels""" )
UpperCAmelCase_= state.apply_fn(**__A ,params=__A ,dropout_rng=__A ,train=__A )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= outputs
return state.loss_fn(
__A ,__A ,__A ,__A ,__A ,__A ,)
UpperCAmelCase_, UpperCAmelCase_= jax.random.split(__A )
UpperCAmelCase_= jax.value_and_grad(__A )
UpperCAmelCase_, UpperCAmelCase_= grad_fn(state.params )
UpperCAmelCase_= jax.lax.pmean({"""loss""": loss} ,axis_name="""batch""" )
UpperCAmelCase_= jax.lax.pmean(__A ,"""batch""" )
UpperCAmelCase_= state.apply_gradients(grads=__A )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name="""batch""" )
def __a ( lowerCAmelCase_ : Any ,**lowerCAmelCase_ : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_= model_inputs.pop("""start_labels""" )
UpperCAmelCase_= model_inputs.pop("""end_labels""" )
UpperCAmelCase_= model_inputs.pop("""pooled_labels""" )
UpperCAmelCase_= state.apply_fn(**__A ,params=state.params ,train=__A )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= outputs
UpperCAmelCase_= state.loss_fn(__A ,__A ,__A ,__A ,__A ,__A )
UpperCAmelCase_= jax.lax.pmean({"""loss""": loss} ,axis_name="""batch""" )
return metrics
class lowercase ( train_state.TrainState):
"""simple docstring"""
a__ : Callable = struct.field(pytree_node=snake_case__)
@dataclass
class lowercase :
"""simple docstring"""
a__ : Args
a__ : Callable
a__ : Callable
a__ : Callable
a__ : Callable
a__ : wandb
a__ : Callable = None
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=None ) -> Tuple:
UpperCAmelCase_= model.params
UpperCAmelCase_= TrainState.create(
apply_fn=model.__call__ , params=__UpperCAmelCase , tx=__UpperCAmelCase , loss_fn=__UpperCAmelCase , )
if ckpt_dir is not None:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= restore_checkpoint(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
UpperCAmelCase_, UpperCAmelCase_= build_tx(**__UpperCAmelCase )
UpperCAmelCase_= train_state.TrainState(
step=__UpperCAmelCase , apply_fn=model.__call__ , params=__UpperCAmelCase , tx=__UpperCAmelCase , opt_state=__UpperCAmelCase , )
UpperCAmelCase_= args
UpperCAmelCase_= data_collator
UpperCAmelCase_= lr
UpperCAmelCase_= params
UpperCAmelCase_= jax_utils.replicate(__UpperCAmelCase )
return state
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase_= self.args
UpperCAmelCase_= len(__UpperCAmelCase ) // args.batch_size
UpperCAmelCase_= jax.random.PRNGKey(0 )
UpperCAmelCase_= jax.random.split(__UpperCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
UpperCAmelCase_= jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase_= get_batched_dataset(__UpperCAmelCase , args.batch_size , seed=__UpperCAmelCase )
UpperCAmelCase_= 0
for batch in tqdm(__UpperCAmelCase , total=__UpperCAmelCase , desc=F"""Running EPOCH-{epoch}""" ):
UpperCAmelCase_= self.data_collator(__UpperCAmelCase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.train_step_fn(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
UpperCAmelCase_= jax_utils.unreplicate(state.step )
UpperCAmelCase_= running_loss.item() / i
UpperCAmelCase_= self.scheduler_fn(state_step - 1 )
UpperCAmelCase_= self.evaluate(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(__UpperCAmelCase ) )
self.logger.log(__UpperCAmelCase , commit=__UpperCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int ) -> List[Any]:
UpperCAmelCase_= get_batched_dataset(__UpperCAmelCase , self.args.batch_size )
UpperCAmelCase_= len(__UpperCAmelCase ) // self.args.batch_size
UpperCAmelCase_= jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase_= 0
for batch in tqdm(__UpperCAmelCase , total=__UpperCAmelCase , desc="""Evaluating ... """ ):
UpperCAmelCase_= self.data_collator(__UpperCAmelCase )
UpperCAmelCase_= self.val_step_fn(__UpperCAmelCase , **__UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ) -> Dict:
UpperCAmelCase_= jax_utils.unreplicate(__UpperCAmelCase )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=""" ... """ )
self.model_save_fn(__UpperCAmelCase , params=state.params )
with open(os.path.join(__UpperCAmelCase , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__UpperCAmelCase , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(__UpperCAmelCase , """data_collator.joblib""" ) )
with open(os.path.join(__UpperCAmelCase , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , __UpperCAmelCase )
print("""DONE""" )
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" ,end=""" ... """ )
with open(os.path.join(__A ,"""flax_model.msgpack""" ) ,"""rb""" ) as f:
UpperCAmelCase_= from_bytes(state.params ,f.read() )
with open(os.path.join(__A ,"""opt_state.msgpack""" ) ,"""rb""" ) as f:
UpperCAmelCase_= from_bytes(state.opt_state ,f.read() )
UpperCAmelCase_= joblib.load(os.path.join(__A ,"""args.joblib""" ) )
UpperCAmelCase_= joblib.load(os.path.join(__A ,"""data_collator.joblib""" ) )
with open(os.path.join(__A ,"""training_state.json""" ) ,"""r""" ) as f:
UpperCAmelCase_= json.load(__A )
UpperCAmelCase_= training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= num_train_steps - warmup_steps
UpperCAmelCase_= optax.linear_schedule(init_value=__A ,end_value=__A ,transition_steps=__A )
UpperCAmelCase_= optax.linear_schedule(init_value=__A ,end_value=1E-7 ,transition_steps=__A )
UpperCAmelCase_= optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def __a ( lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : int ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
def weight_decay_mask(lowerCAmelCase_ : Optional[int] ):
UpperCAmelCase_= traverse_util.flatten_dict(__A )
UpperCAmelCase_= {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(__A )
UpperCAmelCase_= scheduler_fn(__A ,__A ,__A ,__A )
UpperCAmelCase_= optax.adamw(learning_rate=__A ,weight_decay=__A ,mask=__A )
return tx, lr
| 593 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = act_dim
_a = state_dim
_a = hidden_size
_a = max_length
_a = is_training
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_a = random_attention_mask((self.batch_size, self.seq_length) )
_a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ (self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ (self , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = DecisionTransformerModel(config=A )
model.to(A )
model.eval()
_a = model(A , A , A , A , A , A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] = ()
__lowerCamelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = DecisionTransformerModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DecisionTransformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A )] , A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 2 # number of steps of autoregressive prediction we will perform
_a = 10 # defined by the RL environment, may be normalized
_a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_a = model.to(A )
_a = model.config
torch.manual_seed(0 )
_a = torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ) # env.reset()
_a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A )
_a = torch.tensor(A , device=A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a = state
_a = torch.zeros(1 , 0 , config.act_dim , device=A , dtype=torch.floataa )
_a = torch.zeros(1 , 0 , device=A , dtype=torch.floataa )
_a = torch.tensor(0 , device=A , dtype=torch.long ).reshape(1 , 1 )
for step in range(A ):
_a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A )] , dim=1 )
_a = torch.cat([rewards, torch.zeros(1 , 1 , device=A )] , dim=1 )
_a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a , _a , _a = model(
states=A , actions=A , rewards=A , returns_to_go=A , timesteps=A , attention_mask=A , return_dict=A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a , _a , _a , _a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ),
1.0,
False,
{},
)
_a = action_pred[0, -1]
_a = torch.cat([states, state] , dim=1 )
_a = returns_to_go[0, -1] - reward
_a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a = torch.cat(
[timesteps, torch.ones((1, 1) , device=A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 11 | 0 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'detr'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__(self , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=3 , lowerCamelCase=100 , lowerCamelCase=6 , lowerCamelCase=2_048 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=2_048 , lowerCamelCase=8 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=256 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1.0 , lowerCamelCase=False , lowerCamelCase="sine" , lowerCamelCase="resnet50" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=0.1 , **lowerCamelCase , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = backbone_config.get("""model_type""" )
_lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase = config_class.from_dict(lowerCamelCase )
# set timm attributes to None
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None, None, None
_lowerCAmelCase = use_timm_backbone
_lowerCAmelCase = backbone_config
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_queries
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = init_xavier_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = auxiliary_loss
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = backbone
_lowerCAmelCase = use_pretrained_backbone
_lowerCAmelCase = dilation
# Hungarian matcher
_lowerCAmelCase = class_cost
_lowerCAmelCase = bbox_cost
_lowerCAmelCase = giou_cost
# Loss coefficients
_lowerCAmelCase = mask_loss_coefficient
_lowerCAmelCase = dice_loss_coefficient
_lowerCAmelCase = bbox_loss_coefficient
_lowerCAmelCase = giou_loss_coefficient
_lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ (self ):
'''simple docstring'''
return self.d_model
@classmethod
def A__ (cls , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
return cls(backbone_config=lowerCamelCase , **lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCAmelCase = self.backbone_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = version.parse('1.11' )
@property
def A__ (self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def A__ (self ):
'''simple docstring'''
return 1e-5
@property
def A__ (self ):
'''simple docstring'''
return 12 | 156 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
return len(set(__A)) == len(__A)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowerCAmelCase : int = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 2048-bit
1_4: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 3072-bit
1_5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 4096-bit
1_6: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 6144-bit
1_7: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
# 8192-bit
1_8: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=1_6,
),
"generator": 2,
},
}
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
snake_case__ : int = primes[group]['''prime''']
snake_case__ : Tuple = primes[group]['''generator''']
snake_case__ : str = int(hexlify(urandom(32 ) ) , base=16 )
def lowercase__ ( self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : int = pow(self.generator , self.__private_key , self.prime )
return hex(lowerCamelCase )[2:]
def lowercase__ ( self , lowerCamelCase ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = int(lowerCamelCase , base=16 )
if not self.is_valid_public_key(lowerCamelCase ):
raise ValueError('''Invalid public key''' )
snake_case__ : str = pow(lowerCamelCase , self.__private_key , self.prime )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
@staticmethod
def lowercase__ ( lowerCamelCase , lowerCamelCase ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase , (prime - 1) // 2 , lowerCamelCase ) == 1
)
@staticmethod
def lowercase__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 14 ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = int(lowerCamelCase , base=16 )
snake_case__ : List[str] = int(lowerCamelCase , base=16 )
snake_case__ : Optional[int] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase , lowerCamelCase ):
raise ValueError('''Invalid public key''' )
snake_case__ : Union[str, Any] = pow(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return shaaaa(str(lowerCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if len(__A) == 0:
return False
_a = len(__A) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __A)
else:
return binary_search(a_list[midpoint + 1 :] , __A)
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be found in the list:\n").strip())
lowercase_ = "" if binary_search(sequence, target) else "not "
print(F"""{target} was {not_str}found in {sequence}""")
| 11 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
SCREAMING_SNAKE_CASE_ = '▁'
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :List[Any] =VOCAB_FILES_NAMES
a_ :str =PRETRAINED_VOCAB_FILES_MAP
a_ :List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : List[Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple="[SEP]" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="[SEP]" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple="[MASK]" , SCREAMING_SNAKE_CASE__ : Any = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a = (
AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ , normalized=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else mask_token
)
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.sp_model )
def __a ( self : List[str] ):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if self.remove_space:
__a = """ """.join(inputs.strip().split() )
else:
__a = inputs
__a = outputs.replace("""``""" , """\"""" ).replace("""\'\'""" , """\"""" )
if not self.keep_accents:
__a = unicodedata.normalize("""NFKD""" , SCREAMING_SNAKE_CASE__ )
__a = """""".join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] )
if self.do_lower_case:
__a = outputs.lower()
return outputs
def __a ( self : str , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
__a = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
__a = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__a = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a = cur_pieces[1:]
else:
__a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE__ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
return new_pieces
def __a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = []
__a = """"""
__a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a = True
__a = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
__a = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __a ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : Union[str, Any] = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __a ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 582 |
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = len(A )
_a = [0] * len_array
if len_array > 0:
_a = array[0]
for i in range(1 , A ):
_a = self.prefix_sum[i - 1] + array[i]
def a__ (self , A , A ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self , A ) -> bool:
"""simple docstring"""
_a = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> List[str]:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple = 0.1 ) -> Dict:
'''simple docstring'''
A__ = 3
A__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__A )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 514 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
_a = 2
_a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__A)
if n > 1:
factors.append(__A)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def _lowercase ( self : Optional[int] , **lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def _lowercase ( self : str , **lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def _lowercase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(lowerCAmelCase_ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = processor(images=lowerCAmelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = processor(text=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 393 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DDPMScheduler,)
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowerCamelCase )
return config
def _a (self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def _a (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : Dict = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : int = self.get_scheduler_config()
UpperCAmelCase__ : List[Any] = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = len(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = self.dummy_model()
UpperCAmelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCAmelCase__ : Dict = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase__ : int = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase__ : Optional[int] = pred_prev_sample
UpperCAmelCase__ : Any = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : int = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.scheduler_classes[0]
UpperCAmelCase__ : int = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCAmelCase__ : Optional[Any] = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ : Dict = len(_lowerCamelCase )
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : List[Any] = self.dummy_sample_deter
UpperCAmelCase__ : Any = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase__ : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase__ : Dict = pred_prev_sample
UpperCAmelCase__ : List[str] = torch.sum(torch.abs(_lowerCamelCase ) )
UpperCAmelCase__ : str = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.scheduler_classes[0]
UpperCAmelCase__ : Tuple = self.get_scheduler_config()
UpperCAmelCase__ : int = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ : Dict = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = scheduler.timesteps
for i, timestep in enumerate(_lowerCamelCase ):
if i == len(_lowerCamelCase ) - 1:
UpperCAmelCase__ : Dict = -1
else:
UpperCAmelCase__ : Any = timesteps[i + 1]
UpperCAmelCase__ : Tuple = scheduler.previous_timestep(_lowerCamelCase )
UpperCAmelCase__ : int = prev_t.item()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.scheduler_classes[0]
UpperCAmelCase__ : str = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ : str = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCamelCase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ : Dict = [100, 87, 50, 1, 0]
UpperCAmelCase__ : Dict = len(_lowerCamelCase )
with self.assertRaises(_lowerCamelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.scheduler_classes[0]
UpperCAmelCase__ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase__ : Dict = scheduler_class(**_lowerCamelCase )
UpperCAmelCase__ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
| 182 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ = logging.getLogger(__name__)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if os.path.exists(__A):
if os.path.exists(os.path.join(__A , '''config.json''')) and os.path.isfile(
os.path.join(__A , '''config.json''')):
os.remove(os.path.join(__A , '''config.json'''))
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''')) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''')):
os.remove(os.path.join(__A , '''pytorch_model.bin'''))
else:
os.makedirs(__A)
model.save_pretrained(__A)
def lowerCAmelCase (__A , __A=False):
"""simple docstring"""
_a = 2
if unlogit:
_a = torch.pow(__A , __A)
_a = p * torch.log(__A)
_a = 0
return -plogp.sum(dim=-1)
def lowerCAmelCase (__A):
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(__A))))
for row in range(len(__A)):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data))
else:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data))
def lowerCAmelCase (__A , __A , __A , __A=True , __A=True , __A=None , __A=False):
"""simple docstring"""
_a , _a = model.config.num_hidden_layers, model.config.num_attention_heads
_a = torch.zeros(__A , __A).to(args.device)
_a = torch.zeros(__A , __A).to(args.device)
if head_mask is None:
_a = torch.ones(__A , __A).to(args.device)
head_mask.requires_grad_(requires_grad=__A)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a = None
_a = 0.0
_a = 0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0])):
_a = tuple(t.to(args.device) for t in inputs)
((_a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a = model(__A , labels=__A , head_mask=__A)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a , _a , _a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A):
_a = entropy(attn.detach() , __A)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a = 2
_a = torch.pow(torch.pow(__A , __A).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
_a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''')
print_ad_tensor(__A)
if compute_importance:
logger.info('''Head importance scores''')
print_ad_tensor(__A)
logger.info('''Head ranked by importance scores''')
_a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
_a = torch.arange(
head_importance.numel() , device=args.device)
_a = head_ranks.view_as(__A)
print_ad_tensor(__A)
return attn_entropy, head_importance, total_loss
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a , _a , _a = compute_heads_importance(__A , __A , __A , compute_entropy=__A)
_a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold)
_a = torch.ones_like(__A)
_a = max(1 , int(new_head_mask.numel() * args.masking_amount))
_a = original_score
while current_score >= original_score * args.masking_threshold:
_a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a = float('''Inf''')
_a = head_importance.view(-1).sort()[1]
if len(__A) <= num_to_mask:
print('''BREAK BY num_to_mask''')
break
# mask heads
_a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist()))
_a = new_head_mask.view(-1)
_a = 0.0
_a = new_head_mask.view_as(__A)
_a = new_head_mask.clone().detach()
print_ad_tensor(__A)
# Compute metric and head importance again
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A)
_a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''')
print_ad_tensor(__A)
np.save(os.path.join(args.output_dir , '''head_mask.npy''') , head_mask.detach().cpu().numpy())
return head_mask
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A)
_a = 1 / loss
_a = datetime.now() - before_time
_a = sum(p.numel() for p in model.parameters())
_a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A))
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A):
_a = [
v,
]
assert sum(len(__A) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A)
_a = sum(p.numel() for p in model.parameters())
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_a = 1 / loss
_a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A)
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100)
save_model(__A , args.output_dir)
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''')
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''')
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''')
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''')
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''')
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''')
parser.add_argument('''--seed''' , type=__A , default=42)
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''')
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''')
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''')
_a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''')
_a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
_a = torch.device('''cuda''' , args.local_rank)
_a = 1
torch.distributed.init_process_group(backend='''nccl''') # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1)))
_a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
_a = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A)
elif args.n_gpu > 1:
_a = nn.DataParallel(__A)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A)
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin'''))
logger.info('''Training/evaluation parameters %s''' , __A)
# Prepare dataset
_a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
_a = (torch.from_numpy(__A),)
_a = TensorDataset(*__A)
_a = RandomSampler(__A)
_a = DataLoader(__A , sampler=__A , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a = mask_heads(__A , __A , __A)
prune_heads(__A , __A , __A , __A)
if __name__ == "__main__":
main()
| 11 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
if isinstance(__A, collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
def lowercase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> Dict:
pass
def lowercase ( self : Optional[Any] ) -> List[str]:
pass
def lowercase ( self : Tuple ) -> int:
pass
def lowercase ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Optional[int] ) -> Any:
__lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = TFVisionTextDualEncoderModel(lowerCAmelCase_ )
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : str ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_ )
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : int ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = {'vision_model': vision_model, 'text_model': text_model}
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ )
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_ )
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )
__lowerCAmelCase = after_output[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1e-5 )
def lowercase ( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Dict ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_ )
__lowerCAmelCase = model(
input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase_ , lowerCAmelCase_ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase_ )
@slow
def lowercase ( self : Any ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.get_pretrained_model_and_inputs()
__lowerCAmelCase = model_a(**lowerCAmelCase_ )
__lowerCAmelCase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = model_a(**lowerCAmelCase_ )
__lowerCAmelCase = after_outputs[0].numpy()
__lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1e-5 )
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> str:
__lowerCAmelCase = TFViTModel(lowerCAmelCase_ , name='vision_model' )
__lowerCAmelCase = TFBertModel(lowerCAmelCase_ , name='text_model' )
return vision_model, text_model
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = TFViTModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowercase ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : int ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_ )
__lowerCAmelCase = model(
input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_ )
__lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase_ ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCAmelCase = to_atuple(vision_model.config.image_size )
__lowerCAmelCase = to_atuple(vision_model.config.patch_size )
__lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowerCAmelCase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
__lowerCAmelCase = TFDeiTModel(lowerCAmelCase_ , name='vision_model' )
__lowerCAmelCase = TFRobertaModel(lowerCAmelCase_ , name='text_model' )
return vision_model, text_model
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = TFRobertaModelTester(self )
__lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
__lowerCAmelCase = 1_3
__lowerCAmelCase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowerCAmelCase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowerCAmelCase = random_attention_mask([batch_size, 4] )
__lowerCAmelCase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def lowercase ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Union[str, Any]:
__lowerCAmelCase = TFCLIPVisionModel(lowerCAmelCase_ , name='vision_model' )
__lowerCAmelCase = TFBertModel(lowerCAmelCase_ , name='text_model' )
return vision_model, text_model
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = TFCLIPVisionModelTester(self )
__lowerCAmelCase = TFBertModelTester(self )
__lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
__lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = vision_config_and_inputs
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowerCAmelCase_ )
__lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCAmelCase = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='np' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowerCAmelCase = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCAmelCase_ , atol=1e-3 ) )
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''multiplicative_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 1
for i in range(0 , len(__A)):
total *= numbers[i]
_a = str(__A)
steps += 1
return steps
def lowerCAmelCase (__A):
"""simple docstring"""
if not isinstance(__A , __A):
raise ValueError('''additive_persistence() only accepts integral values''')
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''')
_a = 0
_a = str(__A)
while len(__A) != 1:
_a = [int(__A) for i in num_string]
_a = 0
for i in range(0 , len(__A)):
total += numbers[i]
_a = str(__A)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = 'umt5'
snake_case__ : Optional[int] = ['past_key_values']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=2_5_0_1_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_0_2_4 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=6 , SCREAMING_SNAKE_CASE__ : str=3_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=1E-6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1.0 , SCREAMING_SNAKE_CASE__ : Dict="gated-gelu" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Any="T5Tokenizer" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : int=0 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any:
super().__init__(
is_encoder_decoder=SCREAMING_SNAKE_CASE__ , tokenizer_class=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
a_ : Optional[Any] = vocab_size
a_ : List[Any] = d_model
a_ : Any = d_kv
a_ : List[Any] = d_ff
a_ : str = num_layers
a_ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ : Optional[int] = num_heads
a_ : Union[str, Any] = relative_attention_num_buckets
a_ : Dict = relative_attention_max_distance
a_ : List[Any] = dropout_rate
a_ : Any = layer_norm_epsilon
a_ : List[str] = initializer_factor
a_ : Any = feed_forward_proj
a_ : Optional[int] = use_cache
a_ : Union[str, Any] = self.feed_forward_proj.split('-' )
a_ : Any = act_info[-1]
a_ : Union[str, Any] = act_info[0] == 'gated'
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
a_ : Optional[int] = 'gelu_new'
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return self.d_model
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
return self.num_heads
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.num_layers
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
a_ : Optional[int] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a_ : str = 'past_encoder_sequence + sequence'
a_ : List[Any] = {0: 'batch'}
a_ : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a_ : str = {0: 'batch', 1: 'decoder_sequence'}
a_ : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 1_3
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 5E-4
| 570 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self, __a, __a=13, __a=32, __a=3, __a=4, __a=[10, 20, 30, 40], __a=[2, 2, 3, 2], __a=True, __a=True, __a=37, __a="gelu", __a=10, __a=0.02, __a=["stage2", "stage3", "stage4"], __a=3, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : Optional[int] = num_stages
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Dict = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Dict = out_features
_lowerCAmelCase : Tuple = num_labels
_lowerCAmelCase : Any = scope
_lowerCAmelCase : List[str] = num_stages
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCAmelCase : str = None
if self.use_labels:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def snake_case__ ( self):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=__a, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=__a, loss_ignore_index=255, num_labels=self.num_labels, )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = UperNetForSemanticSegmentation(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : Optional[int] = model(__a)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = config_and_inputs
_lowerCAmelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCamelCase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = UperNetModelTester(self)
_lowerCAmelCase : Tuple = ConfigTester(self, config_class=__a, has_text_modality=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self):
'''simple docstring'''
return
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(__a)
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
@unittest.skip(reason="UperNet does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model")
def snake_case__ ( self):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
def check_hidden_states_output(__a, __a, __a):
_lowerCAmelCase : List[Any] = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_lowerCAmelCase : Dict = model(**self._prepare_for_class(__a, __a))
_lowerCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(__a), expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = True
check_hidden_states_output(__a, __a, __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(__a, __a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Dict = _config_zero_init(__a)
_lowerCAmelCase : List[str] = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(config=__a)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason="UperNet does not have tied weights")
def snake_case__ ( self):
'''simple docstring'''
pass
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = UperNetForSemanticSegmentation.from_pretrained(__a)
self.assertIsNotNone(__a)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
_lowerCAmelCase : Optional[Any] = Image.open(__A ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny")
_lowerCAmelCase : str = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny").to(__a)
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : List[str] = processor(images=__a, return_tensors="pt").to(__a)
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**__a)
_lowerCAmelCase : Dict = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape, __a)
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], __a, atol=1E-4))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny")
_lowerCAmelCase : Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny").to(__a)
_lowerCAmelCase : Union[str, Any] = prepare_img()
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="pt").to(__a)
with torch.no_grad():
_lowerCAmelCase : Dict = model(**__a)
_lowerCAmelCase : Dict = torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape, __a)
_lowerCAmelCase : List[str] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], __a, atol=1E-4))
| 500 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
'''simple docstring'''
def __init__(self , A , A=16 , A=13 , A=7 , A=14 , A=10 , A=19 , A=5 , A=4 , A=True , A=16 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=[1, 2, 3, 4, 5] , A=25 , A=5 , ) -> List[str]:
"""simple docstring"""
_a = d_model
_a = parent
_a = batch_size
_a = prediction_length
_a = context_length
_a = cardinality
_a = num_time_features
_a = lags_sequence
_a = embedding_dimension
_a = is_training
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = context_length
_a = prediction_length + label_length
_a = label_length
_a = moving_average
_a = autocorrelation_factor
def a__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
_a = config.context_length + max(config.lags_sequence )
_a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, _past_length] )
_a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_a = floats_tensor([self.batch_size, config.prediction_length] )
_a = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.get_config()
_a = self.prepare_autoformer_inputs_dict(A )
return config, inputs_dict
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ (self , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModel(config=A ).to(A ).eval()
_a = model(**A )
_a = outputs.encoder_last_hidden_state
_a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_encoder()
encoder.save_pretrained(A )
_a = AutoformerEncoder.from_pretrained(A ).to(A )
_a , _a , _a , _a , _a = model.create_network_inputs(**A )
_a , _a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_a = encoder(inputs_embeds=A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = model.get_decoder()
decoder.save_pretrained(A )
_a = AutoformerDecoder.from_pretrained(A ).to(A )
_a = decoder(
trend=A , inputs_embeds=A , encoder_hidden_states=A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : int = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = AutoformerModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_a = model_class(A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
_a , _a = model_class.from_pretrained(A , output_loading_info=A )
self.assertEqual(info['''missing_keys'''] , [] )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = inspect.signature(getattr(A , '''forward''' ) )
# The main input is the name of the argument after `self`
_a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(A )] , A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , '''seq_length''' , A )
_a = getattr(self.model_tester , '''decoder_seq_length''' , A )
_a = getattr(self.model_tester , '''encoder_seq_length''' , A )
_a = getattr(self.model_tester , '''d_model''' , A )
_a = getattr(self.model_tester , '''num_attention_heads''' , A )
_a = d_model // num_attention_heads
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_a = len(A )
_a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(A , A )
# decoder attentions
_a = outputs.decoder_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_a = outputs.cross_attentions
self.assertIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
self.assertEqual(out_len + 2 , len(A ) )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowerCAmelCase (__A="train-batch.pt"):
"""simple docstring"""
_a = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''')
_a = torch.load(__A , map_location=__A)
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch()
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
_a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
_a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , A )
_a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=A )
self.assertTrue(torch.allclose(output[0, :3, :3] , A , atol=A ) )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(A )
_a = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
_a = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
_a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , A )
_a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=A )
_a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , A , rtol=1E-1 ) )
| 11 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowercase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase : Tuple = '''\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n'''
@dataclass
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Union[PIL.Image.Image, np.ndarray]
class _A ( snake_case ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=SCREAMING_SNAKE_CASE_ ,image_encoder=SCREAMING_SNAKE_CASE_ ,image_processor=SCREAMING_SNAKE_CASE_ ,scheduler=SCREAMING_SNAKE_CASE_ ,renderer=SCREAMING_SNAKE_CASE_ ,)
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if latents is None:
snake_case : Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE_ ,generator=SCREAMING_SNAKE_CASE_ ,device=SCREAMING_SNAKE_CASE_ ,dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
snake_case : Union[str, Any] = latents.to(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = latents * scheduler.init_noise_sigma
return latents
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case : Dict = torch.device(F"""cuda:{gpu_id}""" )
snake_case : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder ,"""_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and isinstance(image[0] ,torch.Tensor ):
snake_case : Tuple = torch.cat(SCREAMING_SNAKE_CASE_ ,axis=0 ) if image[0].ndim == 4 else torch.stack(SCREAMING_SNAKE_CASE_ ,axis=0 )
if not isinstance(SCREAMING_SNAKE_CASE_ ,torch.Tensor ):
snake_case : Dict = self.image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
snake_case : Optional[int] = image.to(dtype=self.image_encoder.dtype ,device=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = self.image_encoder(SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
snake_case : Tuple = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
snake_case : Union[str, Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ ,dim=0 )
if do_classifier_free_guidance:
snake_case : int = torch.zeros_like(SCREAMING_SNAKE_CASE_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_ )
def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 1 ,SCREAMING_SNAKE_CASE_ = 25 ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 4.0 ,SCREAMING_SNAKE_CASE_ = 64 ,SCREAMING_SNAKE_CASE_ = "pil" ,SCREAMING_SNAKE_CASE_ = True ,):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ ,PIL.Image.Image ):
snake_case : Union[str, Any] = 1
elif isinstance(SCREAMING_SNAKE_CASE_ ,torch.Tensor ):
snake_case : List[str] = image.shape[0]
elif isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and isinstance(image[0] ,(torch.Tensor, PIL.Image.Image) ):
snake_case : Any = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(SCREAMING_SNAKE_CASE_ )}""" )
snake_case : Tuple = self._execution_device
snake_case : Optional[int] = batch_size * num_images_per_prompt
snake_case : Tuple = guidance_scale > 1.0
snake_case : List[str] = self._encode_image(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# prior
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ,device=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = self.scheduler.timesteps
snake_case : Optional[int] = self.prior.config.num_embeddings
snake_case : int = self.prior.config.embedding_dim
snake_case : Any = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) ,image_embeds.dtype ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,self.scheduler ,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
snake_case : Union[str, Any] = latents.reshape(latents.shape[0] ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[Any] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = self.prior(
SCREAMING_SNAKE_CASE_ ,timestep=SCREAMING_SNAKE_CASE_ ,proj_embedding=SCREAMING_SNAKE_CASE_ ,).predicted_image_embedding
# remove the variance
snake_case , snake_case : Union[str, Any] = noise_pred.split(
scaled_model_input.shape[2] ,dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
snake_case , snake_case : List[Any] = noise_pred.chunk(2 )
snake_case : Dict = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
snake_case : Dict = self.scheduler.step(
SCREAMING_SNAKE_CASE_ ,timestep=SCREAMING_SNAKE_CASE_ ,sample=SCREAMING_SNAKE_CASE_ ,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE_ )
snake_case : Any = []
for i, latent in enumerate(SCREAMING_SNAKE_CASE_ ):
print()
snake_case : int = self.renderer.decode(
latent[None, :] ,SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,ray_batch_size=4096 ,n_coarse_samples=64 ,n_fine_samples=128 ,)
images.append(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = torch.stack(SCREAMING_SNAKE_CASE_ )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
snake_case : Dict = images.cpu().numpy()
if output_type == "pil":
snake_case : Optional[Any] = [self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) for image in images]
# Offload last model to CPU
if hasattr(self ,"""final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 36 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def a__ (self , A , A , A , A , A , A , A ) -> Any:
"""simple docstring"""
_a = OpenLlamaModel(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A )
_a = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Any:
"""simple docstring"""
_a = True
_a = OpenLlamaModel(A )
model.to(A )
model.eval()
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , )
_a = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ (self , A , A , A , A , A , A , A , A , A , ) -> Dict:
"""simple docstring"""
_a = True
_a = True
_a = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
_a = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = OpenLlamaModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''single_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = 3
_a = '''multi_label_classification'''
_a = input_dict['''input_ids''']
_a = input_ids.ne(1 ).to(A )
_a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
_a = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ids_tensor([1, 10] , config.vocab_size )
_a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
_a = original_model(A ).last_hidden_state
_a = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_a = {'''type''': scaling_type, '''factor''': 10.0}
_a = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
_a = scaled_model(A ).last_hidden_state
_a = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
| 11 | 0 |
import copy
import re
class lowercase :
"""simple docstring"""
a__ : List[Any] = 'hp'
a__ : str = {}
a__ : Tuple = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> int:
UpperCAmelCase_= prefix
UpperCAmelCase_= defaults
cls.build_naming_info()
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
if len(__UpperCAmelCase ) == 0:
return ""
UpperCAmelCase_= None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__UpperCAmelCase ) + 1 ):
UpperCAmelCase_= word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_= prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__UpperCAmelCase : str ):
UpperCAmelCase_= """"""
while integer != 0:
UpperCAmelCase_= chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_= 0
while True:
UpperCAmelCase_= word + """#""" + int_to_alphabetic(__UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_= sword
break
UpperCAmelCase_= short_word
UpperCAmelCase_= word
return short_word
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ) -> Optional[int]:
UpperCAmelCase_= param_name.split("""_""" )
UpperCAmelCase_= [TrialShortNamer.shortname_for_word(__UpperCAmelCase , __UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_= ["""""", """_"""]
for separator in separators:
UpperCAmelCase_= separator.join(__UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_= shortname
UpperCAmelCase_= param_name
return shortname
return param_name
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_= TrialShortNamer.shortname_for_key(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= short_name
UpperCAmelCase_= param_name
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ) -> Dict:
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_= {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
UpperCAmelCase_= list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= info
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , __UpperCAmelCase : Any ) -> Tuple:
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_= [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_= cls.NAMING_INFO["""short_param"""][k]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= 1 if v else 0
UpperCAmelCase_= """""" if isinstance(__UpperCAmelCase , (int, float) ) else """-"""
UpperCAmelCase_= F"""{key}{sep}{v}"""
name.append(__UpperCAmelCase )
return "_".join(__UpperCAmelCase )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] , __UpperCAmelCase : Tuple ) -> List[Any]:
UpperCAmelCase_= repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_= []
else:
UpperCAmelCase_= repr.split("""_""" )
UpperCAmelCase_= {}
for value in values:
if "-" in value:
UpperCAmelCase_, UpperCAmelCase_= value.split("""-""" )
else:
UpperCAmelCase_= re.sub("""[0-9.]""" , """""" , __UpperCAmelCase )
UpperCAmelCase_= float(re.sub("""[^0-9.]""" , """""" , __UpperCAmelCase ) )
UpperCAmelCase_= cls.NAMING_INFO["""reverse_short_param"""][p_k]
UpperCAmelCase_= p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_= cls.DEFAULTS[k]
return parameters
| 593 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def a__ (self ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxAlbertModelTester(self )
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained('''albert-base-v2''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(A , attention_mask=A )[0]
_a = (1, 11, 768)
self.assertEqual(output.shape , A )
_a = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : str ) -> Optional[Any]:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCAmelCase , _lowerCAmelCase = array[indexa], array[indexa]
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Dict ) -> Any:
"""simple docstring"""
if length > 1:
_lowerCAmelCase = int(length / 2 )
for i in range(__A , low + middle ):
comp_and_swap(__A , __A , i + middle , __A )
bitonic_merge(__A , __A , __A , __A )
bitonic_merge(__A , low + middle , __A , __A )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[str] ) -> Tuple:
"""simple docstring"""
if length > 1:
_lowerCAmelCase = int(length / 2 )
bitonic_sort(__A , __A , __A , 1 )
bitonic_sort(__A , low + middle , __A , 0 )
bitonic_merge(__A , __A , __A , __A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = input('''Enter numbers separated by a comma:\n''').strip()
SCREAMING_SNAKE_CASE : List[Any] = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''') | 156 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6'''))
def lowerCAmelCase (__A):
"""simple docstring"""
_a = credit_card_number
_a = 0
_a = len(__A) - 2
for i in range(__A , -1 , -2):
# double the value of every second digit
_a = int(cc_number[i])
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_a = cc_number[:i] + str(__A) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__A) - 1 , -1 , -2):
total += int(cc_number[i])
return total % 10 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
_a = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''')
return False
if not 13 <= len(__A) <= 16:
print(F'''{error_message} of its length.''')
return False
if not validate_initial_digits(__A):
print(F'''{error_message} of its first two digits.''')
return False
if not luhn_validation(__A):
print(F'''{error_message} it fails the Luhn check.''')
return False
print(F'''{credit_card_number} is a valid credit card number.''')
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 11 | 0 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_lowerCAmelCase : List[str] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=16 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=14 , lowerCamelCase=10 , lowerCamelCase=19 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=True , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=[1, 2, 3, 4, 5] , lowerCamelCase=25 , lowerCamelCase=5 , ) -> List[str]:
"""simple docstring"""
snake_case__ : Any = d_model
snake_case__ : Optional[Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Union[str, Any] = prediction_length
snake_case__ : List[str] = context_length
snake_case__ : Dict = cardinality
snake_case__ : List[str] = num_time_features
snake_case__ : int = lags_sequence
snake_case__ : List[str] = embedding_dimension
snake_case__ : List[Any] = is_training
snake_case__ : List[str] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Any = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Any = context_length
snake_case__ : List[Any] = prediction_length + label_length
snake_case__ : Optional[int] = label_length
snake_case__ : Union[str, Any] = moving_average
snake_case__ : Any = autocorrelation_factor
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase__ ( self , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : int = config.context_length + max(config.lags_sequence )
snake_case__ : Dict = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case__ : Tuple = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case__ : Any = floats_tensor([self.batch_size, _past_length] )
snake_case__ : int = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case__ : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case__ : int = floats_tensor([self.batch_size, config.prediction_length] )
snake_case__ : List[str] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Any = self.get_config()
snake_case__ : Tuple = self.prepare_autoformer_inputs_dict(lowerCamelCase )
return config, inputs_dict
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : str = AutoformerModel(config=lowerCamelCase ).to(lowerCamelCase ).eval()
snake_case__ : Tuple = model(**lowerCamelCase )
snake_case__ : Dict = outputs.encoder_last_hidden_state
snake_case__ : Optional[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : int = model.get_encoder()
encoder.save_pretrained(lowerCamelCase )
snake_case__ : Union[str, Any] = AutoformerEncoder.from_pretrained(lowerCamelCase ).to(lowerCamelCase )
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Dict = model.create_network_inputs(**lowerCamelCase )
snake_case__ ,snake_case__ : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case__ : Any = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case__ : Union[str, Any] = encoder(inputs_embeds=lowerCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case__ : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case__ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case__ : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case__ : int = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Optional[int] = model.get_decoder()
decoder.save_pretrained(lowerCamelCase )
snake_case__ : Dict = AutoformerDecoder.from_pretrained(lowerCamelCase ).to(lowerCamelCase )
snake_case__ : Optional[Any] = decoder(
trend=lowerCamelCase , inputs_embeds=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCAmelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCAmelCase = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = AutoformerModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
snake_case__ ,snake_case__ : List[Any] = model_class.from_pretrained(lowerCamelCase , output_loading_info=lowerCamelCase )
self.assertEqual(info['''missing_keys'''] , [] )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = inspect.signature(getattr(lowerCamelCase , '''forward''' ) )
# The main input is the name of the argument after `self`
snake_case__ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCamelCase )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ ,snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(lowerCamelCase )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Tuple = [*signature.parameters.keys()]
snake_case__ : int = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(lowerCamelCase )] , lowerCamelCase )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = True
snake_case__ : Tuple = getattr(self.model_tester , '''seq_length''' , lowerCamelCase )
snake_case__ : str = getattr(self.model_tester , '''decoder_seq_length''' , lowerCamelCase )
snake_case__ : int = getattr(self.model_tester , '''encoder_seq_length''' , lowerCamelCase )
snake_case__ : Any = getattr(self.model_tester , '''d_model''' , lowerCamelCase )
snake_case__ : List[Any] = getattr(self.model_tester , '''num_attention_heads''' , lowerCamelCase )
snake_case__ : Tuple = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case__ : str = True
snake_case__ : int = False
snake_case__ : Optional[Any] = True
snake_case__ : Dict = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : str = True
snake_case__ : List[str] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Optional[Any] = outputs.encoder_attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case__ : int = len(lowerCamelCase )
snake_case__ : Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCamelCase , lowerCamelCase )
# decoder attentions
snake_case__ : List[Any] = outputs.decoder_attentions
self.assertIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case__ : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case__ : int = True
snake_case__ : Tuple = True
snake_case__ : Dict = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + 2 , len(lowerCamelCase ) )
snake_case__ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def _A ( snake_case__ : Tuple="train-batch.pt" ):
snake_case__ : Any = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=__A , repo_type='''dataset''' )
snake_case__ : Optional[Any] = torch.load(__A , map_location=__A )
return batch
@require_torch
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase )
snake_case__ : Any = prepare_batch()
with torch.no_grad():
snake_case__ : List[str] = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , future_values=batch['''future_values'''] , future_time_features=batch['''future_time_features'''] , )[0]
snake_case__ : List[Any] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : Optional[int] = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Union[str, Any] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase )
snake_case__ : List[str] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
snake_case__ : str = model(
past_values=batch['''past_values'''] , past_time_features=batch['''past_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , static_categorical_features=batch['''static_categorical_features'''] , ).encoder_last_hidden_state
snake_case__ : Union[str, Any] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : Tuple = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=lowerCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase , atol=lowerCamelCase ) )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowerCamelCase )
snake_case__ : List[Any] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
snake_case__ : List[str] = model.generate(
static_categorical_features=batch['''static_categorical_features'''] , past_time_features=batch['''past_time_features'''] , past_values=batch['''past_values'''] , future_time_features=batch['''future_time_features'''] , past_observed_mask=batch['''past_observed_mask'''] , )
snake_case__ : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCamelCase )
snake_case__ : Union[str, Any] = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=lowerCamelCase )
snake_case__ : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCamelCase , rtol=1E-1 ) )
| 261 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :Dict =FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :List[Any] =FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :int =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :Optional[Any] =FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :Optional[Any] =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :Any =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :Optional[int] =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :Optional[int] =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :str =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :Tuple =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :str =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :Optional[int] =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class lowerCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
a_ :Any =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 582 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> int:
'''simple docstring'''
return 1_0 - x * x
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] ) -> List[str]:
'''simple docstring'''
if equation(__A ) * equation(__A ) >= 0:
raise ValueError("Wrong space!" )
A__ = a
while (b - a) >= 0.01:
# Find middle point
A__ = (a + b) / 2
# Check if middle point is root
if equation(__A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__A ) * equation(__A ) < 0:
A__ = c
else:
A__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 514 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11 | 0 |
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase )-> Dict:
'''simple docstring'''
return [ord(__A ) - 96 for elem in plain]
def UpperCAmelCase ( UpperCAmelCase )-> Any:
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCAmelCase ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' ,__A )
print('''Decoded:''' ,decode(__A ) )
if __name__ == "__main__":
main()
| 393 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = CTRLTokenizer
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Any = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_a = dict(zip(A , range(len(A ) ) ) )
_a = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def a__ (self , **A ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **A )
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = '''adapt react readapt apt'''
_a = '''adapt react readapt apt'''
return input_text, output_text
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a = '''adapt react readapt apt'''
_a = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_a = tokenizer.tokenize(A )
self.assertListEqual(A , A )
_a = tokens + [tokenizer.unk_token]
_a = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 11 | 0 |
"""simple docstring"""
import os
import string
import sys
_A = 1 << 8
_A = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
_A = KEYMAP["""up"""]
_A = KEYMAP["""left"""]
if sys.platform == "win32":
_A = []
_A = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
_A = ord(str(i))
def a__ ( ) -> List[str]:
if os.name == "nt":
import msvcrt
UpperCAmelCase__ : Optional[Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__A ) == 0:
# Read the keystroke
UpperCAmelCase__ : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCAmelCase__ : int = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCAmelCase__ : List[str] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__A )
if ord(__A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
UpperCAmelCase__ : Tuple = chr(KEYMAP["""esc"""] )
except KeyError:
UpperCAmelCase__ : Any = cha[1]
else:
UpperCAmelCase__ : int = ch.decode(__A )
else:
UpperCAmelCase__ : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCAmelCase__ : Dict = sys.stdin.fileno()
UpperCAmelCase__ : Optional[Any] = termios.tcgetattr(__A )
try:
tty.setraw(__A )
UpperCAmelCase__ : Tuple = sys.stdin.read(1 )
finally:
termios.tcsetattr(__A , termios.TCSADRAIN , __A )
return ch
def a__ ( ) -> Tuple:
UpperCAmelCase__ : List[Any] = get_raw_chars()
if ord(__A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__A ) == KEYMAP["esc"]:
UpperCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(__A ) == KEYMAP["mod_int"]:
UpperCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(__A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 182 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = r'''.*/layers_(\d+)'''
_a = key
if re.match(__A , __A):
_a = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __A)
_a = r'''(encoder|decoder)\/'''
if re.match(__A , __A):
_a = re.match(__A , __A).groups()
if groups[0] == "encoder":
_a = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __A)
elif groups[0] == "decoder":
_a = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __A)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(__A):
_a = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(__A)
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import regex as re
with open(__A , '''r''') as f:
_a = f.read()
_a = re.findall(r'''(.*) = ([0-9.]*)''' , __A)
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(__A) if '''.''' in value else int(__A)
_a = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __A)[0]
_a = str(activation[1])
_a = num_experts
_a = SwitchTransformersConfig(**__A)
return config
def lowerCAmelCase (__A , __A , __A=None , __A="./" , __A=8):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_a = checkpoints.load_tax_checkpoint(__A)
if gin_file is not None:
_a = convert_gin_to_config(__A , __A)
else:
_a = SwitchTransformersConfig.from_pretrained(__A)
_a = SwitchTransformersForConditionalGeneration(__A)
_a = flax_params['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = rename_keys(__A)
_a = unflatten_dict(__A , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 11 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_snake_case : Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : str ) -> None:
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = 'instructblip_vision_model'
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_4_0_8 , SCREAMING_SNAKE_CASE__ : List[str]=6_1_4_4 , SCREAMING_SNAKE_CASE__ : str=3_9 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_2_4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_4 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=1E-6 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-10 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : List[str] = hidden_size
a_ : str = intermediate_size
a_ : str = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Union[str, Any] = patch_size
a_ : Union[str, Any] = image_size
a_ : List[Any] = initializer_range
a_ : int = attention_dropout
a_ : Union[str, Any] = layer_norm_eps
a_ : Tuple = hidden_act
a_ : List[str] = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
a_ : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Dict = 'instructblip_qformer'
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Any=7_6_8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=5_1_2 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-12 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : str="absolute" , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=1_4_0_8 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = vocab_size
a_ : str = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : Optional[Any] = num_attention_heads
a_ : int = hidden_act
a_ : List[Any] = intermediate_size
a_ : str = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : List[str] = max_position_embeddings
a_ : Optional[int] = initializer_range
a_ : Optional[Any] = layer_norm_eps
a_ : int = position_embedding_type
a_ : Dict = cross_attention_frequency
a_ : Optional[Any] = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Tuple = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
a_ : int = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = 'instructblip'
snake_case__ : str = True
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=3_2 , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
a_ : List[Any] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
a_ : Optional[int] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
a_ : Tuple = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
a_ : List[Any] = InstructBlipVisionConfig(**SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = InstructBlipQFormerConfig(**SCREAMING_SNAKE_CASE__ )
a_ : Dict = text_config['model_type'] if 'model_type' in text_config else 'opt'
a_ : List[Any] = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.text_config.tie_word_embeddings
a_ : List[Any] = self.text_config.is_encoder_decoder
a_ : str = num_query_tokens
a_ : List[Any] = self.vision_config.hidden_size
a_ : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a_ : Any = 1.0
a_ : Union[str, Any] = 0.02
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> List[str]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : List[Any] = copy.deepcopy(self.__dict__ )
a_ : Optional[Any] = self.vision_config.to_dict()
a_ : Union[str, Any] = self.qformer_config.to_dict()
a_ : str = self.text_config.to_dict()
a_ : Any = self.__class__.model_type
return output
| 570 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11 | 0 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'Model type selected in the list: ' + ', '.join(a)})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'})
lowerCamelCase__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase__ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCamelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCamelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'})
lowerCamelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'})
lowerCamelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'})
lowerCamelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCamelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'})
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'train'
lowerCamelCase__ = 'dev'
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a, __a, __a = None, __a = Split.train, __a = False, __a = None, __a = "pt", ):
'''simple docstring'''
_lowerCAmelCase : Any = args
_lowerCAmelCase : Any = is_language_sensitive
_lowerCAmelCase : List[str] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__a, __a):
try:
_lowerCAmelCase : str = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
_lowerCAmelCase : Union[str, Any] = mode
# Load data features from cache or dataset file
_lowerCAmelCase : List[str] = "v2" if args.version_2_with_negative else "v1"
_lowerCAmelCase : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}", )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase : Dict = cached_features_file + ".lock"
with FileLock(__a):
if os.path.exists(__a) and not args.overwrite_cache:
_lowerCAmelCase : str = time.time()
_lowerCAmelCase : List[str] = torch.load(__a)
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase : List[Any] = self.old_features["features"]
_lowerCAmelCase : Tuple = self.old_features.get("dataset", __a)
_lowerCAmelCase : Any = self.old_features.get("examples", __a)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start)
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run")
else:
if mode == Split.dev:
_lowerCAmelCase : int = self.processor.get_dev_examples(args.data_dir)
else:
_lowerCAmelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir)
_lowerCAmelCase , _lowerCAmelCase : List[str] = squad_convert_examples_to_features(
examples=self.examples, tokenizer=__a, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=__a, )
_lowerCAmelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples}, __a, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]")
def __len__( self):
'''simple docstring'''
return len(self.features)
def __getitem__( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = self.features[i]
_lowerCAmelCase : List[Any] = torch.tensor(feature.input_ids, dtype=torch.long)
_lowerCAmelCase : Any = torch.tensor(feature.attention_mask, dtype=torch.long)
_lowerCAmelCase : Tuple = torch.tensor(feature.token_type_ids, dtype=torch.long)
_lowerCAmelCase : List[str] = torch.tensor(feature.cls_index, dtype=torch.long)
_lowerCAmelCase : str = torch.tensor(feature.p_mask, dtype=torch.float)
_lowerCAmelCase : Tuple = torch.tensor(feature.is_impossible, dtype=torch.float)
_lowerCAmelCase : int = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask})
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible})
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape, dtype=torch.intaa) * self.args.lang_id)})
if self.mode == Split.train:
_lowerCAmelCase : Any = torch.tensor(feature.start_position, dtype=torch.long)
_lowerCAmelCase : Dict = torch.tensor(feature.end_position, dtype=torch.long)
inputs.update({"start_positions": start_positions, "end_positions": end_positions})
return inputs
| 500 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
'''simple docstring'''
def __init__(self ) -> Dict:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def a__ (self , A ) -> Dict:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(A ) ) )
class __A ( A ):
'''simple docstring'''
def a__ (self , A , *A , **A ) -> Optional[Any]:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __A ( A ):
'''simple docstring'''
def a__ (self , A , A ) -> int:
"""simple docstring"""
return output + 1
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
self.assertEqual(test_model._hf_hook , A )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(A , A )
add_hook_to_module(A , A , append=A )
self.assertEqual(isinstance(test_model._hf_hook , A ) , A )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(A , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(A )
self.assertFalse(hasattr(A , '''_hf_hook''' ) )
self.assertFalse(hasattr(A , '''_old_forward''' ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(x + 1 )
_a = test_model(x + 2 )
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PreForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , A , atol=1E-5 )
def a__ (self ) -> str:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(A , A )
_a = test_model(A )
assert torch.allclose(A , output + 2 , atol=1E-5 )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(A )
_a = PostForwardHook()
add_hook_to_module(A , A )
_a = test_model(A )
self.assertTrue(torch.allclose(A , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a = True
_a = test_model(A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(A , AlignDevicesHook(io_same_device=A ) )
_a = torch.randn(2 , 3 ).to(0 )
_a = model(A )
self.assertEqual(output.device , torch.device(0 ) )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
_a = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(A , execution_device=A , offload=A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(A , execution_device=A , offload=A , offload_buffers=A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(A )
self.assertEqual(model.batchnorm.running_mean.device , A )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
A , execution_device=A , offload=A , weights_map=model.state_dict() , offload_buffers=A , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
_a = torch.randn(2 , 3 )
_a = model(A )
self.assertEqual(output.device , A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(A )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 11 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
__lowerCamelCase : Optional[torch.FloatTensor] = None
def lowercase ( __A : Optional[Any] , __A : Optional[Any]=0.999 , __A : List[str]="cosine" , ) -> List[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__A : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__A : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
snake_case : Any = []
for i in range(__A ):
snake_case : str = i / num_diffusion_timesteps
snake_case : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__A ) / alpha_bar_fn(__A ) , __A ) )
return torch.tensor(__A , dtype=torch.floataa )
class _A ( snake_case , snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self ,SCREAMING_SNAKE_CASE_ = 1000 ,SCREAMING_SNAKE_CASE_ = "fixed_small_log" ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1.0 ,SCREAMING_SNAKE_CASE_ = "epsilon" ,SCREAMING_SNAKE_CASE_ = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'""" )
snake_case : Optional[int] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = 1.0 - self.betas
snake_case : Optional[Any] = torch.cumprod(self.alphas ,dim=0 )
snake_case : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
snake_case : Any = 1.0
# setable values
snake_case : Dict = None
snake_case : int = torch.from_numpy(np.arange(0 ,SCREAMING_SNAKE_CASE_ )[::-1].copy() )
snake_case : Any = variance_type
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return sample
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : str = num_inference_steps
snake_case : Dict = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
snake_case : Any = (np.arange(0 ,SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
snake_case : str = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if prev_timestep is None:
snake_case : Optional[int] = t - 1
snake_case : Any = self.alphas_cumprod[t]
snake_case : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case : Dict = 1 - alpha_prod_t
snake_case : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case : Dict = self.betas[t]
else:
snake_case : Any = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case : Union[str, Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
snake_case : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
snake_case : Tuple = torch.log(torch.clamp(SCREAMING_SNAKE_CASE_ ,min=1E-20 ) )
snake_case : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
snake_case : Dict = variance.log()
snake_case : int = beta.log()
snake_case : Tuple = (predicted_variance + 1) / 2
snake_case : int = frac * max_log + (1 - frac) * min_log
return variance
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_ = True ,):
'''simple docstring'''
snake_case : Optional[int] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
snake_case , snake_case : Union[str, Any] = torch.split(SCREAMING_SNAKE_CASE_ ,sample.shape[1] ,dim=1 )
else:
snake_case : Optional[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
snake_case : List[str] = t - 1
snake_case : List[str] = self.alphas_cumprod[t]
snake_case : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case : int = 1 - alpha_prod_t
snake_case : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case : str = self.betas[t]
snake_case : int = self.alphas[t]
else:
snake_case : int = 1 - alpha_prod_t / alpha_prod_t_prev
snake_case : str = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case : int = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case : Dict = torch.clamp(
SCREAMING_SNAKE_CASE_ ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
snake_case : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case : List[Any] = 0
if t > 0:
snake_case : Tuple = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=SCREAMING_SNAKE_CASE_ ,device=model_output.device )
snake_case : Union[str, Any] = self._get_variance(
SCREAMING_SNAKE_CASE_ ,predicted_variance=SCREAMING_SNAKE_CASE_ ,prev_timestep=SCREAMING_SNAKE_CASE_ ,)
if self.variance_type == "fixed_small_log":
snake_case : int = variance
elif self.variance_type == "learned_range":
snake_case : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""" )
snake_case : int = variance * variance_noise
snake_case : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ ,pred_original_sample=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
snake_case : Optional[int] = timesteps.to(original_samples.device )
snake_case : Any = alphas_cumprod[timesteps] ** 0.5
snake_case : int = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
snake_case : Optional[int] = sqrt_alpha_prod.unsqueeze(-1 )
snake_case : Dict = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case : Any = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
snake_case : Dict = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
snake_case : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 36 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'}
def a__ (self ) -> List[Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ (self , A , A=0 ) -> List[Any]:
"""simple docstring"""
if str(A ).startswith('''mps''' ):
_a = torch.manual_seed(A )
else:
_a = torch.Generator(device=A ).manual_seed(A )
_a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ (self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__ (self ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ (self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_local()
def a__ (self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 11 | 0 |
import collections
import os
import re
from pathlib import Path
__A = '''src/transformers'''
# Matches is_xxx_available()
__A = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__A = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A = re.compile(r'''\s+\"\S*\":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__A = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__A = re.compile(r'''^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__A = re.compile(r'''^\s+\"([^\"]+)\",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__A = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__A = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__A = re.compile(r'''^\s*try:''')
# Catches a line with else:
__A = re.compile(r'''^\s*else:''')
def __a ( lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
if _re_test_backend.search(__A ) is None:
return None
UpperCAmelCase_= [b[0] for b in _re_backend.findall(__A )]
backends.sort()
return "_and_".join(__A )
def __a ( lowerCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
with open(__A ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
UpperCAmelCase_= f.readlines()
UpperCAmelCase_= 0
while line_index < len(__A ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__A ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase_= []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase_= lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__A ):
UpperCAmelCase_= _re_one_line_import_struct.search(__A ).groups()[0]
UpperCAmelCase_= re.findall(r"""\[([^\]]+)\]""" ,__A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCAmelCase_= _re_import_struct_key_value.search(__A )
if single_line_import_search is not None:
UpperCAmelCase_= [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__A ) > 0]
objects.extend(__A )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase_= {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase_= find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_= None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_= []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCAmelCase_= lines[line_index]
if _re_import_struct_add_one.search(__A ) is not None:
objects.append(_re_import_struct_add_one.search(__A ).groups()[0] )
elif _re_import_struct_add_many.search(__A ) is not None:
UpperCAmelCase_= _re_import_struct_add_many.search(__A ).groups()[0].split(""", """ )
UpperCAmelCase_= [obj[1:-1] for obj in imports if len(__A ) > 0]
objects.extend(__A )
elif _re_between_brackets.search(__A ) is not None:
UpperCAmelCase_= _re_between_brackets.search(__A ).groups()[0].split(""", """ )
UpperCAmelCase_= [obj[1:-1] for obj in imports if len(__A ) > 0]
objects.extend(__A )
elif _re_quote_object.search(__A ) is not None:
objects.append(_re_quote_object.search(__A ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase_= objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase_= []
while (
line_index < len(__A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCAmelCase_= lines[line_index]
UpperCAmelCase_= _re_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase_= {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__A ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase_= find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase_= None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase_= []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCAmelCase_= lines[line_index]
UpperCAmelCase_= _re_import.search(__A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase_= objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
def find_duplicates(lowerCAmelCase_ : Tuple ):
return [k for k, v in collections.Counter(__A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase_= []
for key in import_dict_objects.keys():
UpperCAmelCase_= find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCAmelCase_= find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase_= """base imports""" if key == """none""" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __a ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_= []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
UpperCAmelCase_= os.path.join(__A ,"""__init__.py""" )
UpperCAmelCase_= parse_init(__A )
if objects is not None:
UpperCAmelCase_= analyze_results(*__A )
if len(__A ) > 0:
UpperCAmelCase_= F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(__A ) )
if len(__A ) > 0:
raise ValueError("""\n\n""".join(__A ) )
def __a ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= []
for path, directories, files in os.walk(__A ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__A ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCAmelCase_= str((Path(__A ) / folder).relative_to(__A ) )
UpperCAmelCase_= short_path.replace(os.path.sep ,""".""" )
submodules.append(__A )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase_= str((Path(__A ) / fname).relative_to(__A ) )
UpperCAmelCase_= short_path.replace(""".py""" ,"""""" ).replace(os.path.sep ,""".""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__A )
return submodules
__A = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __a ( ) -> int:
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCAmelCase_= direct_transformers_import(__A )
UpperCAmelCase_= set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__A ,"""__init__.py""" ) ,"""r""" ) as f:
UpperCAmelCase_= f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" ,__A ) ) )
UpperCAmelCase_= [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__A ) > 0:
UpperCAmelCase_= """\n""".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 593 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=6 , A=17 , A=23 , A=11 , A=True , ) -> Tuple:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = act_dim
_a = state_dim
_a = hidden_size
_a = max_length
_a = is_training
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = floats_tensor((self.batch_size, self.seq_length, 1) )
_a = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
_a = random_attention_mask((self.batch_size, self.seq_length) )
_a = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def a__ (self ) -> str:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def a__ (self , A , A , A , A , A , A , A , ) -> List[Any]:
"""simple docstring"""
_a = DecisionTransformerModel(config=A )
model.to(A )
model.eval()
_a = model(A , A , A , A , A , A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( A , A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
__lowerCamelCase : List[str] = ()
__lowerCamelCase : Tuple = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__lowerCamelCase : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Dict = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = False
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = DecisionTransformerModelTester(self )
_a = ConfigTester(self , config_class=A , hidden_size=37 )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DecisionTransformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(A )] , A )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = 2 # number of steps of autoregressive prediction we will perform
_a = 10 # defined by the RL environment, may be normalized
_a = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
_a = model.to(A )
_a = model.config
torch.manual_seed(0 )
_a = torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ) # env.reset()
_a = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=A )
_a = torch.tensor(A , device=A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_a = state
_a = torch.zeros(1 , 0 , config.act_dim , device=A , dtype=torch.floataa )
_a = torch.zeros(1 , 0 , device=A , dtype=torch.floataa )
_a = torch.tensor(0 , device=A , dtype=torch.long ).reshape(1 , 1 )
for step in range(A ):
_a = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=A )] , dim=1 )
_a = torch.cat([rewards, torch.zeros(1 , 1 , device=A )] , dim=1 )
_a = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_a , _a , _a = model(
states=A , actions=A , rewards=A , returns_to_go=A , timesteps=A , attention_mask=A , return_dict=A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
_a , _a , _a , _a = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=A , dtype=torch.floataa ),
1.0,
False,
{},
)
_a = action_pred[0, -1]
_a = torch.cat([states, state] , dim=1 )
_a = returns_to_go[0, -1] - reward
_a = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_a = torch.cat(
[timesteps, torch.ones((1, 1) , device=A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Any:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__A , __A ) ) )
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] ) -> Tuple:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
_lowerCAmelCase = (
"""Wrong input data\'s dimensions... """
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCAmelCase = (
"""Wrong input data\'s shape... """
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
_lowerCAmelCase = (
"""Input data have different datatype... """
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__A )
_lowerCAmelCase = []
for value in value_array:
_lowerCAmelCase = euclidean(__A , dataset[0] )
_lowerCAmelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCAmelCase = euclidean(__A , __A )
if dist > temp_dist:
_lowerCAmelCase = temp_dist
_lowerCAmelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Optional[int] ) -> str:
"""simple docstring"""
return np.dot(__A , __A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 156 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
return len(set(__A)) == len(__A)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_lowerCAmelCase : Tuple = True
except (ImportError, AttributeError):
_lowerCAmelCase : List[str] = object
def _A ( *snake_case__ : int , **snake_case__ : Tuple ):
pass
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[str] = logging.get_logger("transformers-cli/serving")
def _A ( snake_case__ : int ):
snake_case__ : List[str] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__A , args.host , args.port , args.workers )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 42
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@staticmethod
def lowercase__ ( lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=lowerCamelCase , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=lowerCamelCase , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=lowerCamelCase , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=lowerCamelCase , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=lowerCamelCase , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=lowerCamelCase , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=lowerCamelCase , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=lowerCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=lowerCamelCase )
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Dict = pipeline
snake_case__ : Union[str, Any] = host
snake_case__ : Any = port
snake_case__ : str = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f'''Serving model over {host}:{port}''' )
snake_case__ : Dict = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=lowerCamelCase , response_class=lowerCamelCase , methods=['''POST'''] , ),
] , timeout=600 , )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase__ ( self , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) ) -> str:
"""simple docstring"""
try:
snake_case__ : List[str] = self._pipeline.tokenizer.tokenize(lowerCamelCase )
if return_ids:
snake_case__ : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase )
return ServeTokenizeResult(tokens=lowerCamelCase , tokens_ids=lowerCamelCase )
else:
return ServeTokenizeResult(tokens=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
def lowercase__ ( self , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , lowerCamelCase = Body(lowerCamelCase , embed=lowerCamelCase ) , ) -> List[str]:
"""simple docstring"""
try:
snake_case__ : int = self._pipeline.tokenizer.decode(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return ServeDeTokenizeResult(model='''''' , text=lowerCamelCase )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(lowerCamelCase )} )
async def lowercase__ ( self , lowerCamelCase=Body(lowerCamelCase , embed=lowerCamelCase ) ) -> List[str]:
"""simple docstring"""
if len(lowerCamelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
snake_case__ : Optional[Any] = self._pipeline(lowerCamelCase )
return ServeForwardResult(output=lowerCamelCase )
except Exception as e:
raise HTTPException(500 , {'''error''': str(lowerCamelCase )} )
| 261 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if len(__A) == 0:
return False
_a = len(__A) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __A)
else:
return binary_search(a_list[midpoint + 1 :] , __A)
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by comma:\n").strip()
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be found in the list:\n").strip())
lowercase_ = "" if binary_search(sequence, target) else "not "
print(F"""{target} was {not_str}found in {sequence}""")
| 11 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.