code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = DiTPipeline
__UpperCAmelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCAmelCase : List[Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCAmelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCAmelCase : Tuple = False
def __snake_case ( self : int ) -> Any:
torch.manual_seed(0 )
__snake_case : Dict = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCamelCase , )
__snake_case : Optional[int] = AutoencoderKL()
__snake_case : str = DDIMScheduler()
__snake_case : Union[str, Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __snake_case ( self : List[str] , lowerCamelCase : Any , lowerCamelCase : List[Any]=0 ) -> List[Any]:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Optional[int] = torch.manual_seed(lowerCamelCase )
else:
__snake_case : int = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Tuple = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[Any] ) -> List[Any]:
__snake_case : Dict = "cpu"
__snake_case : Optional[int] = self.get_dummy_components()
__snake_case : Dict = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[Any] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Tuple = pipe(**lowerCamelCase ).images
__snake_case : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__snake_case : int = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__snake_case : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1E-3 )
def __snake_case ( self : Any ) -> Optional[Any]:
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[Any] ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[Any] ) -> List[Any]:
__snake_case : Union[str, Any] = torch.manual_seed(0 )
__snake_case : Any = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__snake_case : Dict = ["vase", "umbrella", "white shark", "white wolf"]
__snake_case : Any = pipe.get_label_ids(lowerCamelCase )
__snake_case : Tuple = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
__snake_case : Union[str, Any] = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __snake_case ( self : Dict ) -> Optional[int]:
__snake_case : List[str] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__snake_case : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__snake_case : List[str] = ["vase", "umbrella"]
__snake_case : int = pipe.get_label_ids(lowerCamelCase )
__snake_case : Optional[Any] = torch.manual_seed(0 )
__snake_case : Dict = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
__snake_case : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowerCamelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def a__ ( ):
UpperCAmelCase_ = "https://pypi.org/pypi/diffusers/json"
UpperCAmelCase_ = json.loads(request.urlopen(lowerCAmelCase__ ).read() )["releases"].keys()
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : version.Version(lowerCAmelCase__ ) )
def a__ ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = Path(lowerCAmelCase__ ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def a__ ( lowerCAmelCase__ ):
init_hf_modules()
UpperCAmelCase_ = Path(lowerCAmelCase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCAmelCase_ = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def a__ ( lowerCAmelCase__ ):
with open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.read()
# Imports of the form `import .xxx`
UpperCAmelCase_ = re.findall("^\s*import\s+\.(\S+)\s*$" , lowerCAmelCase__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , lowerCAmelCase__ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = False
UpperCAmelCase_ = [module_file]
UpperCAmelCase_ = []
# Let's recurse through all relative imports
while not no_change:
UpperCAmelCase_ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCAmelCase__ ) )
UpperCAmelCase_ = Path(lowerCAmelCase__ ).parent
UpperCAmelCase_ = [str(module_path / m ) for m in new_imports]
UpperCAmelCase_ = [f for f in new_import_files if f not in all_relative_imports]
UpperCAmelCase_ = [f"""{f}.py""" for f in new_import_files]
UpperCAmelCase_ = len(lowerCAmelCase__ ) == 0
all_relative_imports.extend(lowerCAmelCase__ )
return all_relative_imports
def a__ ( lowerCAmelCase__ ):
with open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.read()
# Imports of the form `import xxx`
UpperCAmelCase_ = re.findall("^\s*import\s+(\S+)\s*$" , lowerCAmelCase__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , lowerCAmelCase__ , flags=re.MULTILINE )
# Only keep the top-level module
UpperCAmelCase_ = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
UpperCAmelCase_ = list(set(lowerCAmelCase__ ) )
UpperCAmelCase_ = []
for imp in imports:
try:
importlib.import_module(lowerCAmelCase__ )
except ImportError:
missing_packages.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{', '.join(lowerCAmelCase__ )}. Run `pip install {' '.join(lowerCAmelCase__ )}`""" )
return get_relative_imports(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = module_path.replace(os.path.sep , "." )
UpperCAmelCase_ = importlib.import_module(lowerCAmelCase__ )
if class_name is None:
return find_pipeline_class(lowerCAmelCase__ )
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ):
from ..pipelines import DiffusionPipeline
UpperCAmelCase_ = dict(inspect.getmembers(lowerCAmelCase__ , inspect.isclass ) )
UpperCAmelCase_ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCAmelCase__ )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
UpperCAmelCase_ = cls
return pipeline_class
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , ):
UpperCAmelCase_ = str(lowerCAmelCase__ )
UpperCAmelCase_ = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
UpperCAmelCase_ = module_file_or_url
UpperCAmelCase_ = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
UpperCAmelCase_ = get_diffusers_versions()
# cut ".dev0"
UpperCAmelCase_ = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
UpperCAmelCase_ = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
UpperCAmelCase_ = f"""v{revision}"""
elif revision == "main":
UpperCAmelCase_ = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
UpperCAmelCase_ = COMMUNITY_PIPELINES_URL.format(revision=lowerCAmelCase__ , pipeline=lowerCAmelCase__ )
try:
UpperCAmelCase_ = cached_download(
lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , )
UpperCAmelCase_ = "git"
UpperCAmelCase_ = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
UpperCAmelCase_ = hf_hub_download(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , )
UpperCAmelCase_ = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
UpperCAmelCase_ = check_imports(lowerCAmelCase__ )
# Now we move the module inside our cached dynamic modules.
UpperCAmelCase_ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCAmelCase__ )
UpperCAmelCase_ = Path(lowerCAmelCase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCAmelCase__ , submodule_path / module_file )
for module_needed in modules_needed:
UpperCAmelCase_ = f"""{module_needed}.py"""
shutil.copy(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = use_auth_token
elif use_auth_token is True:
UpperCAmelCase_ = HfFolder.get_token()
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = model_info(lowerCAmelCase__ , revision=lowerCAmelCase__ , token=lowerCAmelCase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCAmelCase_ = submodule_path / commit_hash
UpperCAmelCase_ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCAmelCase__ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCAmelCase__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCAmelCase__ , f"""{module_needed}.py""" , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
return os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , **lowerCAmelCase__ , ):
UpperCAmelCase_ = get_cached_module_file(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
return get_class_in_module(lowerCAmelCase__ , final_module.replace(".py" , "" ) )
| 82 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : List[str] = CTRLTokenizer
snake_case__ : List[str] = False
snake_case__ : Any = False
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : str = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_lowerCamelCase : Dict = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Optional[Any] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_lowerCamelCase : Any = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = '''adapt react readapt apt'''
_lowerCamelCase : Dict = '''adapt react readapt apt'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : List[Any] = '''adapt react readapt apt'''
_lowerCamelCase : Union[str, Any] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 83 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = """conditional_detr"""
_UpperCamelCase : Any = ["""past_key_values"""]
_UpperCamelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=300 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=6 , snake_case=2048 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=256 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=2 , snake_case=5 , snake_case=2 , snake_case=0.25 , **snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowercase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case , snake_case ):
lowercase = backbone_config.get('model_type' )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(snake_case )
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = cls_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.d_model
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
| 84 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
SCREAMING_SNAKE_CASE__ : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def _a ( lowercase__ : bytes ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(lowercase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''.join(bin(lowercase__ )[2:].zfill(8 ) for byte in data )
SCREAMING_SNAKE_CASE__ : Any = len(lowercase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
SCREAMING_SNAKE_CASE__ : List[Any] = b'=' * ((6 - len(lowercase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase__ ) % 6)
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase__ ) , 6 ) ).encode()
+ padding
)
def _a ( lowercase__ : str ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(lowercase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase__ , lowercase__ ):
try:
SCREAMING_SNAKE_CASE__ : str = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
SCREAMING_SNAKE_CASE__ : int = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
SCREAMING_SNAKE_CASE__ : int = encoded_data[:-padding]
SCREAMING_SNAKE_CASE__ : List[str] = ''.join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
SCREAMING_SNAKE_CASE__ : List[str] = ''.join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase__ ) , 8 )
]
return bytes(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[str] = RobertaTokenizer
_lowerCamelCase : Tuple = RobertaTokenizerFast
_lowerCamelCase : str = True
_lowerCamelCase : Tuple = {'cls_token': '<s>'}
def __A ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
A_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
A_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
A_ = {"unk_token": "<unk>"}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase ) )
def __A ( self : Any , **UpperCAmelCase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A ( self : List[Any] , **UpperCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Optional[int] ):
A_ = "lower newer"
A_ = "lower newer"
return input_text, output_text
def __A ( self : Union[str, Any] ):
A_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ = "lower newer"
A_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
A_ = tokenizer.tokenize(UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokens + [tokenizer.unk_token]
A_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __A ( self : int ):
A_ = self.tokenizer_class.from_pretrained("roberta-base" )
A_ = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase )
A_ = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase )
A_ = tokenizer.encode(
"sequence builders" , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
A_ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __A ( self : Tuple ):
A_ = self.get_tokenizer()
A_ = "Encode this sequence."
A_ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# Testing spaces after special tokens
A_ = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )} ) # mask token has a left space
A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
A_ = "Encode <mask> sequence"
A_ = "Encode <mask>sequence"
A_ = tokenizer.encode(UpperCAmelCase )
A_ = encoded.index(UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokenizer.encode(UpperCAmelCase )
A_ = encoded.index(UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Any ):
pass
def __A ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
A_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
A_ = "A, <mask> AllenNLP sentence."
A_ = tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
A_ = tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
A_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
A_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __A ( self : Union[str, Any] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
A_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCAmelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , UpperCAmelCase )
self.assertEqual(post_processor_state["trim_offsets"] , UpperCAmelCase )
def __A ( self : List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
A_ = f'''{text_of_1_token} {text_of_1_token}'''
A_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
A_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
A_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ), len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
A_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ), len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
A_ = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ) + 1, 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
A_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ), 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
A_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ), 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) | 86 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = """▁"""
UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCAmelCase = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCAmelCase = {
"""facebook/m2m100_418M""": 1024,
}
# fmt: off
UpperCAmelCase = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class lowercase__ ( A_ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase = []
__UpperCAmelCase = []
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="m2m100" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=8 , **SCREAMING_SNAKE_CASE , ) -> None:
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : List[Any] = language_codes
_lowerCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCamelCase : str = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCamelCase : Optional[Any] = kwargs.get("""additional_special_tokens""" , [])
kwargs["additional_special_tokens"] += [
self.get_lang_token(SCREAMING_SNAKE_CASE)
for lang_code in fairseq_language_code
if self.get_lang_token(SCREAMING_SNAKE_CASE) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , language_codes=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Union[str, Any] = vocab_file
_lowerCamelCase : str = load_json(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Optional[int] = spm_file
_lowerCamelCase : Union[str, Any] = load_spm(SCREAMING_SNAKE_CASE , self.sp_model_kwargs)
_lowerCamelCase : Optional[int] = len(self.encoder)
_lowerCamelCase : Dict = {
self.get_lang_token(SCREAMING_SNAKE_CASE): self.encoder_size + i for i, lang_code in enumerate(SCREAMING_SNAKE_CASE)
}
_lowerCamelCase : List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(SCREAMING_SNAKE_CASE)}
_lowerCamelCase : str = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCamelCase : List[str] = src_lang if src_lang is not None else """en"""
_lowerCamelCase : List[str] = tgt_lang
_lowerCamelCase : List[str] = self.get_lang_id(self._src_lang)
self.set_src_lang_special_tokens(self._src_lang)
_lowerCamelCase : Dict = num_madeup_words
@property
def UpperCamelCase_ ( self) -> int:
return len(self.encoder) + len(self.lang_token_to_id)
@property
def UpperCamelCase_ ( self) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder[self.unk_token])
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict:
_lowerCamelCase : Tuple = []
_lowerCamelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) + token
_lowerCamelCase : Tuple = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE)
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE)
return out_string.strip()
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens)
_lowerCamelCase : Dict = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Dict:
_lowerCamelCase : str = self.__dict__.copy()
_lowerCamelCase : List[Any] = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
_lowerCamelCase : int = {}
_lowerCamelCase : Any = load_spm(self.spm_file , self.sp_model_kwargs)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]:
_lowerCamelCase : Optional[int] = Path(SCREAMING_SNAKE_CASE)
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory')
_lowerCamelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCamelCase : Optional[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , SCREAMING_SNAKE_CASE)
if os.path.abspath(self.spm_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.spm_file):
with open(SCREAMING_SNAKE_CASE , """wb""") as fi:
_lowerCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE)
return (str(SCREAMING_SNAKE_CASE), str(SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "en" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "ro" , **SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
_lowerCamelCase : List[Any] = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang)
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
_lowerCamelCase : List[str] = src_lang
_lowerCamelCase : str = self(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = self.get_lang_id(SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = tgt_lang_id
return inputs
def UpperCamelCase_ ( self) -> Any:
self.set_src_lang_special_tokens(self.src_lang)
def UpperCamelCase_ ( self) -> Tuple:
self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : List[Any] = self.get_lang_token(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = self.lang_token_to_id[lang_token]
_lowerCamelCase : str = [self.cur_lang_id]
_lowerCamelCase : int = [self.eos_token_id]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : int = self.get_lang_token(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = self.lang_token_to_id[lang_token]
_lowerCamelCase : List[Any] = [self.cur_lang_id]
_lowerCamelCase : Tuple = [self.eos_token_id]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str:
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : int = self.get_lang_token(SCREAMING_SNAKE_CASE)
return self.lang_token_to_id[lang_token]
def _snake_case ( __snake_case : str , __snake_case : Dict[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def _snake_case ( __snake_case : str ):
"""simple docstring"""
with open(__snake_case , """r""" ) as f:
return json.load(__snake_case )
def _snake_case ( __snake_case : List[str] , __snake_case : str ):
"""simple docstring"""
with open(__snake_case , """w""" ) as f:
json.dump(__snake_case , __snake_case , indent=2 )
| 88 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
SCREAMING_SNAKE_CASE : List[str] = ["small", "medium", "large"]
SCREAMING_SNAKE_CASE : Any = "lm_head.decoder.weight"
SCREAMING_SNAKE_CASE : Optional[Any] = "lm_head.weight"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Union[str, Any] = torch.load(lowerCamelCase_ )
_lowercase : List[str] = d.pop(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
SCREAMING_SNAKE_CASE : Dict = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl")
SCREAMING_SNAKE_CASE : Union[str, Any] = F"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 89 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 0 |
'''simple docstring'''
def _snake_case ( A ) -> float:
return 10 - x * x
def _snake_case ( A , A ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(A ) * equation(A ) >= 0:
raise ValueError('''Wrong space!''' )
lowerCAmelCase__ = a
while (b - a) >= 0.01:
# Find middle point
lowerCAmelCase__ = (a + b) / 2
# Check if middle point is root
if equation(A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(A ) * equation(A ) < 0:
lowerCAmelCase__ = c
else:
lowerCAmelCase__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 90 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_lowercase = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''facebook/nllb-200-distilled-600M'''
_lowerCamelCase: List[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
_lowerCamelCase: Any = '''translator'''
_lowerCamelCase: List[str] = AutoTokenizer
_lowerCamelCase: int = AutoModelForSeqaSeqLM
_lowerCamelCase: List[str] = LANGUAGE_CODES
_lowerCamelCase: List[Any] = ['''text''', '''text''', '''text''']
_lowerCamelCase: Optional[Any] = ['''text''']
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ,A_ : Union[str, Any] ,A_ : Dict ) -> Tuple:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
A = self.lang_to_code[src_lang]
A = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A_ ,return_tensors='pt' ,src_lang=A_ ,tgt_lang=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Dict ) -> int:
return self.model.generate(**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ) -> List[Any]:
return self.post_processor.decode(outputs[0].tolist() ,skip_special_tokens=A_ ) | 91 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
UpperCamelCase_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = LxmertTokenizer
def __init__( self : str , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int="[UNK]" , UpperCAmelCase__ : Tuple="[SEP]" , UpperCAmelCase__ : str="[PAD]" , UpperCAmelCase__ : Optional[Any]="[CLS]" , UpperCAmelCase__ : Tuple="[MASK]" , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : Union[str, Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
lowercase : Union[str, Any] =getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
lowercase : int =do_lower_case
lowercase : Tuple =strip_accents
lowercase : Any =tokenize_chinese_chars
lowercase : Union[str, Any] =normalizer_class(**UpperCAmelCase__ )
lowercase : int =do_lower_case
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int=None ):
'''simple docstring'''
lowercase : Union[str, Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase : List[Any] =[self.sep_token_id]
lowercase : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 92 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__A = logging.get_logger(__name__)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Dict = to_pil_image(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = pil_image.size
lowerCAmelCase__ :Dict = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type='dict' , config=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCAmelCase__ :List[str] = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()]
lowerCAmelCase__ :Optional[Any] = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :str = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowerCAmelCase__ :int = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ :List[Any] = []
for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Any = [x, y, x + w, y + h]
actual_boxes.append(_SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowerCAmelCase__ :str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[int] = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_5_5 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = "" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase__ :Union[str, Any] = get_size_dict(__UpperCAmelCase )
lowerCAmelCase__ :int = do_resize
lowerCAmelCase__ :Union[str, Any] = size
lowerCAmelCase__ :List[str] = resample
lowerCAmelCase__ :int = do_rescale
lowerCAmelCase__ :int = rescale_value
lowerCAmelCase__ :List[Any] = do_normalize
lowerCAmelCase__ :Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ :List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ :Tuple = apply_ocr
lowerCAmelCase__ :List[str] = ocr_lang
lowerCAmelCase__ :Any = tesseract_config
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
lowerCAmelCase__ :Tuple = (size['height'], size['width'])
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :int = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ :Tuple = size if size is not None else self.size
lowerCAmelCase__ :str = get_size_dict(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ :int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ :str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ :str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ :str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ :List[Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ :List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ :Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ :Union[str, Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ :str = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ :Optional[Any] = [to_numpy_array(__UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCAmelCase__ :Union[str, Any] = []
lowerCAmelCase__ :Dict = []
for image in images:
lowerCAmelCase__ , lowerCAmelCase__ :Dict = apply_tesseract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
words_batch.append(__UpperCAmelCase )
boxes_batch.append(__UpperCAmelCase )
if do_resize:
lowerCAmelCase__ :Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ :List[Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ :List[Any] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
lowerCAmelCase__ :Tuple = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase__ :List[str] = BatchFeature(data={'pixel_values': images} , tensor_type=__UpperCAmelCase )
if apply_ocr:
lowerCAmelCase__ :int = words_batch
lowerCAmelCase__ :Tuple = boxes_batch
return data
| 93 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
assert (
isinstance(__A , __A ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
lowercase , lowercase : int =1, 1
for _ in range(number_of_steps - 1 ):
lowercase , lowercase : Optional[int] =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
"""simple docstring"""
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : list[int] ) -> None:
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = [0] * len_array
if len_array > 0:
UpperCAmelCase_ : str = array[0]
for i in range(1 , lowerCAmelCase_ ):
UpperCAmelCase_ : int = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int ) -> bool:
UpperCAmelCase_ : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCAmelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowerCamelCase = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowerCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowerCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def a ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> tuple[str, float]:
__magic_name__: Any = len([g for position, g in enumerate(__UpperCAmelCase ) if g == main_target[position]] )
return (item, float(__UpperCAmelCase ))
def a ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> tuple[str, str]:
__magic_name__: Tuple = random.randint(0 , len(__UpperCAmelCase ) - 1 )
__magic_name__: Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
__magic_name__: Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def a ( __UpperCAmelCase : str , __UpperCAmelCase : list[str] ) -> str:
__magic_name__: Union[str, Any] = list(__UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__magic_name__: Any = random.choice(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def a ( __UpperCAmelCase : tuple[str, float] , __UpperCAmelCase : list[tuple[str, float]] , __UpperCAmelCase : list[str] , ) -> list[str]:
__magic_name__: Union[str, Any] = []
# Generate more children proportionally to the fitness score.
__magic_name__: List[str] = int(parent_a[1] * 1_0_0 ) + 1
__magic_name__: Union[str, Any] = 1_0 if child_n >= 1_0 else child_n
for _ in range(__UpperCAmelCase ):
__magic_name__: Union[str, Any] = population_score[random.randint(0 , __UpperCAmelCase )][0]
__magic_name__, __magic_name__: List[str] = crossover(parent_a[0] , __UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCAmelCase , __UpperCAmelCase ) )
pop.append(mutate(__UpperCAmelCase , __UpperCAmelCase ) )
return pop
def a ( __UpperCAmelCase : str , __UpperCAmelCase : list[str] , __UpperCAmelCase : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__magic_name__: Any = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__magic_name__: Dict = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__magic_name__: List[str] = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__UpperCAmelCase )
# Generate random starting population.
__magic_name__: Optional[Any] = []
for _ in range(__UpperCAmelCase ):
population.append("""""".join([random.choice(__UpperCAmelCase ) for i in range(len(__UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__magic_name__, __magic_name__: Optional[int] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__magic_name__: Tuple = [evaluate(__UpperCAmelCase , __UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
__magic_name__: List[Any] = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] , reverse=__UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__magic_name__: int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCAmelCase )
# Normalize population score to be between 0 and 1.
__magic_name__: Dict = [
(item, score / len(__UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCAmelCase ):
population.extend(select(population_score[int(__UpperCAmelCase )] , __UpperCAmelCase , __UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__lowerCamelCase = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
__lowerCamelCase = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 96 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 0 |
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> int:
lowercase_ = {}
def _lowercase ( self : Union[str, Any] ) -> None:
print(self.vertex )
for i in self.vertex:
print(SCREAMING_SNAKE_CASE_ , ''' -> ''' , ''' -> '''.join([str(SCREAMING_SNAKE_CASE_ ) for j in self.vertex[i]] ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(SCREAMING_SNAKE_CASE_ )
else:
# else make a new vertex
lowercase_ = [to_vertex]
def _lowercase ( self : str ) -> None:
# visited array for storing already visited nodes
lowercase_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ) -> None:
# mark start vertex as visited
lowercase_ = True
print(SCREAMING_SNAKE_CASE_ , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__a = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 97 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowercase__ : Any = True
from torch.cuda.amp import autocast
lowercase__ : List[str] = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_snake_case : Optional[bool] = field(
default=__magic_name__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_snake_case : Optional[bool] = field(
default=__magic_name__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
_snake_case : Optional[float] = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
_snake_case : Optional[float] = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
_snake_case : Optional[float] = field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def a__ ( lowercase : ModelArguments, lowercase : TrainingArguments ) -> int:
"""simple docstring"""
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
_UpperCamelCase = logging.WARNING
if model_args.verbose_logging:
_UpperCamelCase = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_UpperCamelCase = logging.INFO
logger.setLevel(lowercase )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(
default=__magic_name__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_snake_case : Optional[str] = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_snake_case : Optional[str] = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_snake_case : Optional[str] = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_snake_case : Optional[int] = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
_snake_case : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_snake_case : Optional[float] = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : WavaVecaForPreTraining
_snake_case : WavaVecaFeatureExtractor
_snake_case : Union[bool, str] = "longest"
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
def __call__( self : List[Any] , lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = self.feature_extractor.pad(
lowerCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
_UpperCamelCase = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_UpperCamelCase = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
_UpperCamelCase = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_UpperCamelCase = 1
_UpperCamelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_UpperCamelCase = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCAmelCase__ , min_masks=2 , )
return batch
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : str , *lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : List[str]=0 , lowerCAmelCase__ : str=1.0 , **lowerCAmelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = 0
_UpperCamelCase = max_gumbel_temp
_UpperCamelCase = min_gumbel_temp
_UpperCamelCase = gumbel_temp_decay
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : nn.Module , lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
_UpperCamelCase = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ )
else:
_UpperCamelCase = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def a__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
configure_logger(lowercase, lowercase )
# Downloading and loading a dataset from the hub.
_UpperCamelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_UpperCamelCase = DatasetDict()
_UpperCamelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""", cache_dir=model_args.cache_dir, )
_UpperCamelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""", cache_dir=model_args.cache_dir, )
else:
# make sure only "validation" and "train" keys remain"
_UpperCamelCase = DatasetDict()
_UpperCamelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split='''validation''', cache_dir=model_args.cache_dir, )
_UpperCamelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""{data_args.train_split_name}""", cache_dir=model_args.cache_dir, )
# only normalized-inputs-training is supported
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=lowercase )
def prepare_dataset(lowercase : Optional[Any] ):
# check that all files have the correct sampling rate
_UpperCamelCase , _UpperCamelCase = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_UpperCamelCase = datasets.map(
lowercase, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
_UpperCamelCase = vectorized_datasets.filter(
lambda lowercase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(lowercase : int ):
return feature_extractor(batch['''speech'''], sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_UpperCamelCase = vectorized_datasets.map(
lowercase, batched=lowercase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets['''train'''].column_names, )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_UpperCamelCase = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
_UpperCamelCase = WavaVecaForPreTraining(lowercase )
_UpperCamelCase = DataCollatorForWavaVecaPretraining(model=lowercase, feature_extractor=lowercase )
_UpperCamelCase = WavaVecaPreTrainer(
model=lowercase, data_collator=lowercase, args=lowercase, train_dataset=vectorized_datasets['''train'''], eval_dataset=vectorized_datasets['''validation'''], tokenizer=lowercase, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay, )
trainer.train()
if __name__ == "__main__":
main()
| 98 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 0 |
import pprint
import requests
SCREAMING_SNAKE_CASE = 'https://zenquotes.io/api'
def a ():
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def a ():
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = random_quotes()
pprint.pprint(response)
| 99 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
SCREAMING_SNAKE_CASE__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCAmelCase_ )] )
SCREAMING_SNAKE_CASE__ = np.array(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCAmelCase_ ) ) , x.transpose() ) , lowerCAmelCase_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
SCREAMING_SNAKE_CASE__ = (1, 2, 1)
SCREAMING_SNAKE_CASE__ = (1, 1, 0, 7)
SCREAMING_SNAKE_CASE__ = SARIMAX(
lowerCAmelCase_ , exog=lowerCAmelCase_ , order=lowerCAmelCase_ , seasonal_order=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model.fit(disp=lowerCAmelCase_ , maxiter=6_0_0 , method='''nm''' )
SCREAMING_SNAKE_CASE__ = model_fit.predict(1 , len(lowerCAmelCase_ ) , exog=[test_match] )
return result[0]
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
SCREAMING_SNAKE_CASE__ = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = regressor.predict(lowerCAmelCase_ )
return y_pred[0]
def __snake_case ( lowerCAmelCase_ ) -> float:
train_user.sort()
SCREAMING_SNAKE_CASE__ = np.percentile(lowerCAmelCase_ , 2_5 )
SCREAMING_SNAKE_CASE__ = np.percentile(lowerCAmelCase_ , 7_5 )
SCREAMING_SNAKE_CASE__ = qa - qa
SCREAMING_SNAKE_CASE__ = qa - (iqr * 0.1)
return low_lim
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i in list_vote:
if i > actual_result:
SCREAMING_SNAKE_CASE__ = not_safe + 1
else:
if abs(abs(lowerCAmelCase_ ) - abs(lowerCAmelCase_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_A : Tuple = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
_A : str = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
_A : Optional[int] = Normalizer().fit_transform(data_input_df.values)
# split data
_A : Union[str, Any] = normalize_df[:, 2].tolist()
_A : Optional[int] = normalize_df[:, 0].tolist()
_A : Any = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_A : Optional[int] = normalize_df[:, [1, 2]].tolist()
_A : Dict = x[: len(x) - 1]
_A : Any = x[len(x) - 1 :]
# for linear regression & sarimax
_A : Any = total_date[: len(total_date) - 1]
_A : int = total_user[: len(total_user) - 1]
_A : List[str] = total_match[: len(total_match) - 1]
_A : Any = total_date[len(total_date) - 1 :]
_A : str = total_user[len(total_user) - 1 :]
_A : Dict = total_match[len(total_match) - 1 :]
# voting system with forecasting
_A : Optional[int] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_A : List[Any] = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 100 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 0 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __lowercase (datasets.BuilderConfig ):
"""simple docstring"""
_UpperCAmelCase = None
class __lowercase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_UpperCAmelCase = PandasConfig
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ : Optional[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE_ : Dict = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ : Optional[int] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ : List[str] = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
with open(lowerCAmelCase__ , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pa.Table.from_pandas(pd.read_pickle(lowerCAmelCase__ ) )
yield i, self._cast_table(lowerCAmelCase__ )
| 101 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
"""simple docstring"""
__magic_name__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase : Dict = _calculate(days - 1 , SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase : Dict = _calculate(days - 1 , SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase : Optional[int] = state_late + state_absent + state_ontime
UpperCamelCase : Any = prizestrings
return prizestrings
def UpperCamelCase (SCREAMING_SNAKE_CASE = 30 ):
return _calculate(SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 102 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
snake_case = list[tuple[int, int]]
snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCAmelCase :
def __init__( self : Any , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Node | None ):
"""simple docstring"""
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = parent
class UpperCAmelCase :
def __init__( self : List[str] , __lowerCamelCase : tuple[int, int] , __lowerCamelCase : tuple[int, int] ):
"""simple docstring"""
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , __lowerCamelCase )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , __lowerCamelCase )
_snake_case = [self.start]
_snake_case = False
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
while self.node_queue:
_snake_case = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_snake_case = True
return self.retrace_path(__lowerCamelCase )
_snake_case = self.get_successors(__lowerCamelCase )
for node in successors:
self.node_queue.append(__lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def __UpperCAmelCase ( self : str , __lowerCamelCase : Node ):
"""simple docstring"""
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__lowerCamelCase , __lowerCamelCase , self.target.pos_y , self.target.pos_x , __lowerCamelCase ) )
return successors
def __UpperCAmelCase ( self : str , __lowerCamelCase : Node | None ):
"""simple docstring"""
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
class UpperCAmelCase :
def __init__( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = BreadthFirstSearch(__lowerCamelCase , __lowerCamelCase )
_snake_case = BreadthFirstSearch(__lowerCamelCase , __lowerCamelCase )
_snake_case = False
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_snake_case = self.fwd_bfs.node_queue.pop(0 )
_snake_case = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_snake_case = True
return self.retrace_bidirectional_path(
__lowerCamelCase , __lowerCamelCase )
_snake_case = current_bwd_node
_snake_case = current_fwd_node
_snake_case = {
self.fwd_bfs: self.fwd_bfs.get_successors(__lowerCamelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(__lowerCamelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__lowerCamelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Node , __lowerCamelCase : Node ):
"""simple docstring"""
_snake_case = self.fwd_bfs.retrace_path(__lowerCamelCase )
_snake_case = self.bwd_bfs.retrace_path(__lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
snake_case = (0, 0)
snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case = time.time()
snake_case = BreadthFirstSearch(init, goal)
snake_case = bfs.search()
snake_case = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
snake_case = time.time()
snake_case = BidirectionalBreadthFirstSearch(init, goal)
snake_case = bd_bfs.search()
snake_case = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 103 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
A__ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCAmelCase_ )
if number < 0:
return False
A__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = XLMTokenizer
__a : int = False
def snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
SCREAMING_SNAKE_CASE_ : str = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
SCREAMING_SNAKE_CASE_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Dict = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = XLMTokenizer(self.vocab_file ,self.merges_file )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'lower'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['low', 'er</w>']
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokens + ['<unk>']
SCREAMING_SNAKE_CASE_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('sequence builders' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case__ ,snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 105 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ):
A_ : str = KandinskyVaaPriorPipeline
A_ : Optional[int] = ['prompt']
A_ : Optional[Any] = ['prompt', 'negative_prompt']
A_ : List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
A_ : int = False
@property
def __UpperCamelCase ( self : Tuple ) -> Any:
return 32
@property
def __UpperCamelCase ( self : int ) -> List[str]:
return 32
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
return self.time_input_dim
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
return 100
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
A = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
A = PriorTransformer(**__UpperCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __UpperCamelCase ( self : Tuple ) -> int:
torch.manual_seed(0 )
A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A = CLIPVisionModelWithProjection(__UpperCamelCase )
return model
@property
def __UpperCamelCase ( self : Any ) -> Tuple:
A = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCamelCase , do_normalize=__UpperCamelCase , do_resize=__UpperCamelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
A = self.dummy_prior
A = self.dummy_image_encoder
A = self.dummy_text_encoder
A = self.dummy_tokenizer
A = self.dummy_image_processor
A = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=__UpperCamelCase , clip_sample_range=1_0.0 , )
A = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple=0 ) -> Optional[int]:
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**__UpperCamelCase )
A = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
A = output.image_embeds
A = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
A = image[0, -10:]
A = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
A = torch_device == 'cpu'
A = True
A = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , test_mean_pixel_difference=__UpperCamelCase , )
@skip_mps
def __UpperCamelCase ( self : Dict ) -> str:
A = torch_device == 'cpu'
A = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCamelCase , test_mean_pixel_difference=__UpperCamelCase , ) | 106 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_UpperCAmelCase : List[str] = True
from torch.cuda.amp import autocast
_UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Whether to log verbose messages or not."} , )
__lowerCAmelCase = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
__lowerCAmelCase = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
__lowerCAmelCase = field(
default=0.9_9_9_9_9_5 , metadata={"help": "Decay of gumbel temperature during training."} )
def _SCREAMING_SNAKE_CASE ( __snake_case : ModelArguments , __snake_case : TrainingArguments ):
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
_A = logging.WARNING
if model_args.verbose_logging:
_A = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_A = logging.INFO
logger.setLevel(__snake_case )
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__lowerCAmelCase = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__lowerCAmelCase = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
__lowerCAmelCase = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__lowerCAmelCase = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__lowerCAmelCase = field(
default=2_0.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = "longest"
__lowerCAmelCase = None
__lowerCAmelCase = None
def __call__( self : int, UpperCamelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
_A = self.feature_extractor.pad(
UpperCamelCase__, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt', )
_A = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
_A = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_A = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
_A = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_A = 1
_A = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_A = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=UpperCamelCase__, min_masks=2, )
return batch
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any], *UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any]=1, UpperCamelCase__ : List[str]=0, UpperCamelCase__ : int=1.0, **UpperCamelCase__ : Dict ) -> str:
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
_A = 0
_A = max_gumbel_temp
_A = min_gumbel_temp
_A = gumbel_temp_decay
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : nn.Module, UpperCamelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
model.train()
_A = self._prepare_inputs(UpperCamelCase__ )
if self.use_amp:
with autocast():
_A = self.compute_loss(UpperCamelCase__, UpperCamelCase__ )
else:
_A = self.compute_loss(UpperCamelCase__, UpperCamelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_A = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_A = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
_A = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(UpperCamelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(UpperCamelCase__, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(UpperCamelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_A , _A , _A = parser.parse_args_into_dataclasses()
configure_logger(__snake_case , __snake_case )
# Downloading and loading a dataset from the hub.
_A = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_A = DatasetDict()
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_A = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__snake_case )
def prepare_dataset(__snake_case : str ):
# check that all files have the correct sampling rate
_A , _A = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_A = datasets.map(
__snake_case , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
_A = vectorized_datasets.filter(
lambda __snake_case : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__snake_case : str ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_A = vectorized_datasets.map(
__snake_case , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_A = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
_A = WavaVecaForPreTraining(__snake_case )
_A = DataCollatorForWavaVecaPretraining(model=__snake_case , feature_extractor=__snake_case )
_A = WavaVecaPreTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=__snake_case , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 107 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> bool:
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
_UpperCAmelCase = str(__snake_case )
_UpperCAmelCase = """""".join(sorted(__snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _SCREAMING_SNAKE_CASE ( __snake_case = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while True:
if check_bouncy(__snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(99)}") | 108 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
return x + 2
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase ,{"""x""": 3} )
__SCREAMING_SNAKE_CASE = """x = y"""
__SCREAMING_SNAKE_CASE = {"""y""": 5}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase ,{"""x""": 5, """y""": 5} )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """y = add_two(x)"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""add_two""": add_two} ,state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """x = 3"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase ,{"""x""": 3} )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""add_two""": add_two} ,state=lowerCamelCase )
self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """y""": 5} )
self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """x = 3\ny = 5"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """y""": 5} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """text""": """This is x: 3."""} )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """y""": 2} )
__SCREAMING_SNAKE_CASE = {"""x""": 8}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase ,{"""x""": 8, """y""": 5} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""add_two""": add_two} ,state=lowerCamelCase )
self.assertListEqual(lowerCamelCase ,[3, 5] )
self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """test_list""": [3, 5]} )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """y = x"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{} ,state=lowerCamelCase )
assert result == 3
self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """y""": 3} )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""add_two""": add_two} ,state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """test_list""": [3, 5]} )
__SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
__SCREAMING_SNAKE_CASE = {"""x""": 3}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""add_two""": add_two} ,state=lowerCamelCase )
assert result == 5
self.assertDictEqual(lowerCamelCase ,{"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(lowerCamelCase ,{"""range""": range} ,state=lowerCamelCase )
assert result == 2
self.assertDictEqual(lowerCamelCase ,{"""x""": 2, """i""": 2} )
| 109 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCamelCase ( _snake_case ,_snake_case=100 ,_snake_case=" " ):
UpperCAmelCase__ : Optional[int] = text.split(_snake_case )
return [character.join(text[i : i + n] ).strip() for i in range(0 ,len(_snake_case ) ,_snake_case )]
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = [], []
for title, text in zip(documents['title'] ,documents['text'] ):
if text is not None:
for passage in split_text(_snake_case ):
titles.append(title if title is not None else '' )
texts.append(_snake_case )
return {"title": titles, "text": texts}
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : List[Any] = ctx_tokenizer(
documents['title'] ,documents['text'] ,truncation=_snake_case ,padding='longest' ,return_tensors='pt' )['input_ids']
UpperCAmelCase__ : str = ctx_encoder(input_ids.to(device=_snake_case ) ,return_dict=_snake_case ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,):
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase__ : Optional[int] = load_dataset(
'csv' ,data_files=[rag_example_args.csv_path] ,split='train' ,delimiter='\t' ,column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase__ : Tuple = dataset.map(_snake_case ,batched=_snake_case ,num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase__ : Optional[int] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_snake_case )
UpperCAmelCase__ : int = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase__ : int = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase__ : str = dataset.map(
partial(_snake_case ,ctx_encoder=_snake_case ,ctx_tokenizer=_snake_case ) ,batched=_snake_case ,batch_size=processing_args.batch_size ,features=_snake_case ,)
# And finally save your dataset
UpperCAmelCase__ : Any = os.path.join(rag_example_args.output_dir ,'my_knowledge_dataset' )
dataset.save_to_disk(_snake_case )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d ,index_hnsw_args.m ,faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' ,custom_index=_snake_case )
# And save the index
UpperCAmelCase__ : str = os.path.join(rag_example_args.output_dir ,'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(_snake_case )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class a :
UpperCamelCase : str = field(
default=str(Path(lowercase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
UpperCamelCase : Optional[str] = field(
default=lowercase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
UpperCamelCase : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
UpperCamelCase : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
UpperCamelCase : Optional[str] = field(
default=str(Path(lowercase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class a :
UpperCamelCase : Optional[int] = field(
default=lowercase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
UpperCamelCase : int = field(
default=1_6 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class a :
UpperCamelCase : int = field(
default=7_6_8 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
UpperCamelCase : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 110 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 0 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = [0 for i in range(len(__a ) )]
# initialize interval's left pointer and right pointer
_snake_case, _snake_case = 0, 0
for i in range(1 , len(__a ) ):
# case when current index is inside the interval
if i <= right_pointer:
_snake_case = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_snake_case = min_edge
while go_next(__a , __a , __a ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_snake_case, _snake_case = i, i + z_result[i] - 1
return z_result
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return i + z_result[i] < len(__a ) and s[z_result[i]] == s[i + z_result[i]]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_snake_case = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__a ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 585 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 0 |
"""simple docstring"""
import math
def UpperCamelCase ( _A ) -> List[Any]:
lowercase : Any = 0
lowercase : Any = 0
while num > 0:
lowercase : Tuple = num % 8
lowercase : List[str] = octal + (remainder * math.floor(math.pow(10 , __a ) ))
counter += 1
lowercase : str = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(__a )}"""
def UpperCamelCase ( ) -> Tuple:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 264 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowercase_ : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
lowercase = {}
with open(__a , """r""" ) as file:
for line_number, line in enumerate(__a ):
lowercase = line.strip()
if line:
lowercase = line.split()
lowercase = line_number
lowercase = words[0]
lowercase = value
return result
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : str ):
for attribute in key.split(""".""" ):
lowercase = getattr(__a , __a )
lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
lowercase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowercase = """param"""
if weight_type is not None and weight_type != "param":
lowercase = getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
lowercase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
lowercase = getattr(__a , __a )
lowercase = shape_pointer.shape
# let's reduce dimension
lowercase = value[0]
else:
lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
lowercase = getattr(__a , __a )
lowercase = value
else:
lowercase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[Any] ):
lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
lowercase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowercase = """param"""
if weight_type is not None and weight_type != "param":
lowercase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowercase = """.""".join([key, hf_param_name] )
else:
lowercase = key
lowercase = value if """lm_head""" in full_key else value[0]
lowercase_ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : int , lowercase_ : int=None , lowercase_ : List[str]=None ):
lowercase = False
for key, mapped_key in MAPPING.items():
lowercase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(__a )[0].split(""".""" )[-2]
lowercase = mapped_key.replace("""*""" , __a )
if "weight_g" in name:
lowercase = """weight_g"""
elif "weight_v" in name:
lowercase = """weight_v"""
elif "bias" in name:
lowercase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase = """weight"""
else:
lowercase = None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Dict ):
lowercase = []
lowercase = fairseq_model.state_dict()
lowercase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowercase = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == """group""" , )
lowercase = True
else:
lowercase = load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ):
lowercase = full_name.split("""conv_layers.""" )[-1]
lowercase = name.split(""".""" )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str , lowercase_ : Any=None , lowercase_ : str=None , lowercase_ : Any=True , lowercase_ : Union[str, Any]=False ):
if config_path is not None:
lowercase = WavaVecaConfig.from_pretrained(__a )
else:
lowercase = WavaVecaConfig()
if is_seq_class:
lowercase = read_txt_into_dict(__a )
lowercase = idalabel
lowercase = WavaVecaForSequenceClassification(__a )
lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
lowercase = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase = target_dict.pad_index
lowercase = target_dict.bos_index
lowercase = target_dict.eos_index
lowercase = len(target_dict.symbols )
lowercase = os.path.join(__a , """vocab.json""" )
if not os.path.isdir(__a ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase = 0
lowercase = 1
with open(__a , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__a , __a )
lowercase = WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__a , )
lowercase = True if config.feat_extract_norm == """layer""" else False
lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowercase = WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowercase = WavaVecaForCTC(__a )
else:
lowercase = WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowercase = argparse.Namespace(task="""audio_pretraining""" )
lowercase = fairseq.tasks.setup_task(__a )
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
lowercase = model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
lowercase_ : Tuple = parser.parse_args()
lowercase_ : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 588 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
def __init__( self : str ,A : Optional[int] ,A : int=13 ,A : List[Any]=7 ,A : Optional[int]=True ,A : Optional[int]=True ,A : Union[str, Any]=True ,A : Any=True ,A : Optional[int]=99 ,A : Optional[Any]=32 ,A : int=5 ,A : str=4 ,A : str=37 ,A : Tuple="gelu" ,A : Tuple=0.1 ,A : Dict=0.1 ,A : Dict=512 ,A : str=16 ,A : List[Any]=2 ,A : Tuple=0.0_2 ,A : Union[str, Any]=4 ,):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : List[Any] = seq_length
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : List[Any] = use_attention_mask
UpperCAmelCase__ : str = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Optional[Any] = num_choices
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : int = None
if self.use_attention_mask:
UpperCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ : Dict = RobertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( lowercase__ , unittest.TestCase ):
snake_case_ = True
snake_case_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = FlaxRobertaModelTester(self )
@slow
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Tuple = model_class_name.from_pretrained("""roberta-base""" ,from_pt=lowercase_ )
UpperCAmelCase__ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
| 65 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( ):
'''simple docstring'''
_snake_case = 2
while True:
if is_prime(__a ):
yield num
num += 1
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 2_00_00_00 ):
'''simple docstring'''
return sum(takewhile(lambda SCREAMING_SNAKE_CASE__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 672 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = (boundary[1] - boundary[0]) / steps
A_ = boundary[0]
A_ = boundary[1]
A_ = make_points(__a , __a , __a )
A_ = 0.0
y += (h / 2.0) * f(__a )
for i in x_i:
# print(i)
y += h * f(__a )
y += (h / 2.0) * f(__a )
return y
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = a + h
while x < (b - h):
yield x
A_ = x + h
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ): # enter your function here
'''simple docstring'''
A_ = (x - 0) * (x - 0)
return y
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = 0.0 # Lower bound of integration
A_ = 1.0 # Upper bound of integration
A_ = 10.0 # define number of steps or resolution
A_ = [a, b] # define boundary of integration
A_ = method_a(__a , __a )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 203 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : list ) -> List[str]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__a ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(__a ) == 1:
return True
lowerCAmelCase = series[1] - series[0]
for index in range(len(__a ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : list ) -> List[str]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__a ) == 0:
raise ValueError("""Input list must be a non empty list""" )
lowerCAmelCase = 0
for val in series:
answer += val
return answer / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __snake_case ( lowercase__ ):
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = params
lowercase : Optional[Any] = np.array(lowercase_ )
lowercase : str = np.array([len(lowercase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self ,snake_case ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.params.max_model_input_size
lowercase : Optional[int] = self.lengths > max_len
logger.info(f"Splitting {sum(lowercase_ )} too long sequences." )
def divide_chunks(snake_case ,snake_case ):
return [l[i : i + n] for i in range(0 ,len(lowercase_ ) ,lowercase_ )]
lowercase : Optional[int] = []
lowercase : Dict = []
if self.params.mlm:
lowercase , lowercase : Union[str, Any] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
lowercase , lowercase : Any = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids ,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase : Optional[Any] = []
for sub_s in divide_chunks(seq_ ,max_len - 2 ):
if sub_s[0] != cls_id:
lowercase : List[str] = np.insert(lowercase_ ,0 ,lowercase_ )
if sub_s[-1] != sep_id:
lowercase : int = np.insert(lowercase_ ,len(lowercase_ ) ,lowercase_ )
assert len(lowercase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase_ )
new_tok_ids.extend(lowercase_ )
new_lengths.extend([len(lowercase_ ) for l in sub_seqs] )
lowercase : Optional[int] = np.array(lowercase_ )
lowercase : int = np.array(lowercase_ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = len(self )
lowercase : List[Any] = self.lengths > 11
lowercase : List[str] = self.token_ids[indices]
lowercase : Any = self.lengths[indices]
lowercase : str = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase : List[Any] = self.params.special_tok_ids["""unk_token"""]
lowercase : Tuple = len(self )
lowercase : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase : Dict = (unk_occs / self.lengths) < 0.5
lowercase : int = self.token_ids[indices]
lowercase : Optional[int] = self.lengths[indices]
lowercase : int = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = [t[0] for t in batch]
lowercase : str = [t[1] for t in batch]
assert len(lowercase_ ) == len(lowercase_ )
# Max for paddings
lowercase : Dict = max(lowercase_ )
# Pad token ids
if self.params.mlm:
lowercase : Optional[int] = self.params.special_tok_ids["""pad_token"""]
else:
lowercase : str = self.params.special_tok_ids["""unk_token"""]
lowercase : Any = [list(t.astype(lowercase_ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase_ )) for t in token_ids]
assert len(tk_ ) == len(lowercase_ )
assert all(len(lowercase_ ) == max_seq_len_ for t in tk_ )
lowercase : Tuple = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase : int = torch.tensor(lowercase_ ) # (bs)
return tk_t, lg_t
| 336 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCAmelCase : Dict = list[list[float | int]]
def _A ( A ,A ) -> str:
lowercase : str = len(__a )
lowercase : Any = [[0 for _ in range(size + 1 )] for _ in range(__a )]
lowercase : Optional[int] = 4_2
lowercase : Dict = 4_2
lowercase : Optional[Any] = 4_2
lowercase : List[Any] = 4_2
lowercase : Any = 4_2
lowercase : Tuple = 4_2
for row in range(__a ):
for col in range(__a ):
lowercase : Dict = matrix[row][col]
lowercase : int = vector[row][0]
lowercase : str = 0
lowercase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
lowercase : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__a ,__a ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase , lowercase : Dict = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 ,__a ):
lowercase : str = augmented[rowa][col] / augmented[row][col]
lowercase : Tuple = 0
for cola in range(col + 1 ,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 ,__a ):
for row in range(__a ):
lowercase : Optional[int] = augmented[row][col] / augmented[col][col]
for cola in range(__a ,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] ,1_0 )] for row in range(__a )
]
def _A ( A ) -> Dict:
lowercase : Union[str, Any] = len(__a )
lowercase : List[str] = [[0 for _ in range(__a )] for _ in range(__a )]
lowercase : List[str] = [[0] for _ in range(__a )]
lowercase : Any = 4_2
lowercase : Tuple = 4_2
lowercase : str = 4_2
lowercase : Dict = 4_2
for x_val, y_val in enumerate(__a ):
for col in range(__a ):
lowercase : Dict = (x_val + 1) ** (size - col - 1)
lowercase : Tuple = y_val
lowercase : List[Any] = solve(__a ,__a )
def interpolated_func(A ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__a ) )
return interpolated_func
def _A ( A ) -> Optional[Any]:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _A ( A = question_function ,A = 1_0 ) -> Tuple:
lowercase : str = [func(__a ) for x_val in range(1 ,order + 1 )]
lowercase : Tuple = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 ,order + 1 )
]
lowercase : Tuple = 0
lowercase : int = 4_2
lowercase : Optional[Any] = 4_2
for poly in polynomials:
lowercase : Optional[int] = 1
while func(__a ) == poly(__a ):
x_val += 1
ret += poly(__a )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 372 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 100_0000 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __a ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 533 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : List[Any] = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SpeechTaTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
def A ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = SpeechTaTokenizer(lowercase_ )
UpperCamelCase = AddedToken('<mask>' , lstrip=lowercase_ , rstrip=lowercase_ )
UpperCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Optional[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'this is a test'
UpperCamelCase = 'this is a test'
return input_text, output_text
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Tuple=2_0 , UpperCamelCase__ : List[str]=5 ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.get_input_output_texts(lowercase_ )
UpperCamelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
UpperCamelCase = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ )
return text, ids
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = '<pad>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(lowercase_ ) , 8_1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCamelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCamelCase = tokenizer.add_tokens(lowercase_ )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size + len(lowercase_ ) )
UpperCamelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCamelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCamelCase = tokenizer.add_special_tokens(lowercase_ )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(lowercase_ )
self.assertNotEqual(lowercase_ , 0 )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , len(lowercase_ ) )
self.assertEqual(lowercase_ , all_size_a + len(lowercase_ ) )
UpperCamelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowercase_ )
self.assertGreaterEqual(len(lowercase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def A ( self : Optional[Any] ):
"""simple docstring"""
pass
def A ( self : List[str] ):
"""simple docstring"""
pass
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(lowercase_ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
UpperCamelCase = tokenizer.convert_tokens_to_ids(lowercase_ )
# fmt: off
self.assertListEqual(lowercase_ , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
UpperCamelCase = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
UpperCamelCase = {
'input_ids': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=lowercase_ , )
| 430 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__a ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_snake_case = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_snake_case = [[0.0, 0.0], [0.0, 0.0]]
_snake_case, _snake_case = matrix[1][1], matrix[0][0]
_snake_case, _snake_case = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__a ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__a ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_snake_case = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_snake_case = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_snake_case = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_snake_case = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_snake_case = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_snake_case = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_snake_case = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_snake_case = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_snake_case = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_snake_case = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_snake_case = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_snake_case = array(__a )
for i in range(3 ):
for j in range(3 ):
_snake_case = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_snake_case = array(__a )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__a )
# Calculate the inverse of the matrix
return [[float(d(__a ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" ) | 585 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
@staticmethod
def __snake_case ( *__magic_name__ :Union[str, Any] , **__magic_name__ :List[Any] ) ->Any:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase (unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __snake_case ( self :Dict , __magic_name__ :Tuple , __magic_name__ :str , __magic_name__ :List[str] ) ->Tuple:
lowercase : Any = ObjectDetectionPipeline(model=lowercase_ , image_processor=lowercase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __snake_case ( self :Tuple , __magic_name__ :Dict , __magic_name__ :Optional[int] ) ->Optional[int]:
lowercase : Union[str, Any] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
"""score""": ANY(lowercase_ ),
"""label""": ANY(lowercase_ ),
"""box""": {"""xmin""": ANY(lowercase_ ), """ymin""": ANY(lowercase_ ), """xmax""": ANY(lowercase_ ), """ymax""": ANY(lowercase_ )},
} , )
import datasets
lowercase : int = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowercase : List[Any] = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
lowercase : Union[str, Any] = object_detector(lowercase_ , threshold=0.0 )
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowercase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowercase_ , {
"""score""": ANY(lowercase_ ),
"""label""": ANY(lowercase_ ),
"""box""": {"""xmin""": ANY(lowercase_ ), """ymin""": ANY(lowercase_ ), """xmax""": ANY(lowercase_ ), """ymax""": ANY(lowercase_ )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def __snake_case ( self :Any ) ->str:
pass
@require_torch
def __snake_case ( self :Optional[int] ) ->Dict:
lowercase : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
lowercase : List[str] = AutoModelForObjectDetection.from_pretrained(lowercase_ )
lowercase : List[str] = AutoFeatureExtractor.from_pretrained(lowercase_ )
lowercase : Any = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
lowercase : List[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
lowercase : Tuple = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def __snake_case ( self :str ) ->Optional[Any]:
lowercase : Dict = """facebook/detr-resnet-50"""
lowercase : Any = AutoModelForObjectDetection.from_pretrained(lowercase_ )
lowercase : int = AutoFeatureExtractor.from_pretrained(lowercase_ )
lowercase : str = ObjectDetectionPipeline(model=lowercase_ , feature_extractor=lowercase_ )
lowercase : Tuple = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
lowercase : List[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __snake_case ( self :Optional[int] ) ->List[Any]:
lowercase : Union[str, Any] = """facebook/detr-resnet-50"""
lowercase : Dict = pipeline("""object-detection""" , model=lowercase_ )
lowercase : Dict = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
lowercase : List[str] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def __snake_case ( self :Any ) ->Tuple:
lowercase : Union[str, Any] = 0.9985
lowercase : List[Any] = """facebook/detr-resnet-50"""
lowercase : Dict = pipeline("""object-detection""" , model=lowercase_ )
lowercase : str = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=lowercase_ )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.9988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def __snake_case ( self :int ) ->Union[str, Any]:
lowercase : Optional[Any] = """Narsil/layoutlmv3-finetuned-funsd"""
lowercase : List[Any] = 0.9993
lowercase : str = pipeline("""object-detection""" , model=lowercase_ , threshold=lowercase_ )
lowercase : List[str] = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 264 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowercase_ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
if isinstance(__a , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__a , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__a ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class __UpperCamelCase (lowercase__ ):
__A = ['''pixel_values''']
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowercase = size if size is not None else {"""shortest_edge""": 256}
lowercase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase = get_size_dict(lowercase_ , param_name="""crop_size""" )
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_factor
lowercase = offset
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
'''simple docstring'''
lowercase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" in size:
lowercase = get_resize_output_image_size(lowercase_ , size["""shortest_edge"""] , default_to_square=lowercase_ )
elif "height" in size and "width" in size:
lowercase = (size["""height"""], size["""width"""])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
'''simple docstring'''
lowercase = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size["""height"""], size["""width"""]) , data_format=lowercase_ , **lowercase_ )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
lowercase = image.astype(np.floataa )
if offset:
lowercase = image - (scale / 2)
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowercase = to_numpy_array(lowercase_ )
if do_resize:
lowercase = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ )
if do_center_crop:
lowercase = self.center_crop(lowercase_ , size=lowercase_ )
if do_rescale:
lowercase = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_ )
if do_normalize:
lowercase = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ )
lowercase = to_channel_dimension_format(lowercase_ , lowercase_ )
return image
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = offset if offset is not None else self.offset
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(lowercase_ , param_name="""crop_size""" )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowercase = make_batched(lowercase_ )
lowercase = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
lowercase = {"""pixel_values""": videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 588 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 0 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return np.dot(__a , __a )
class __lowercase :
def __init__( self : Union[str, Any] ,*,
A : List[Any] = np.inf ,A : Dict = "linear" ,A : Union[str, Any] = 0.0 ,):
'''simple docstring'''
UpperCAmelCase__ : Any = regularization
UpperCAmelCase__ : Tuple = gamma
if kernel == "linear":
UpperCAmelCase__ : int = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
UpperCAmelCase__ : int = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
UpperCAmelCase__ : Union[str, Any] = f"Unknown kernel: {kernel}"
raise ValueError(lowercase_ )
def __lowercase ( self : Union[str, Any] ,A : int ,A : int ):
'''simple docstring'''
return np.dot(lowercase_ ,lowercase_ )
def __lowercase ( self : Optional[Any] ,A : Tuple ,A : Any ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def __lowercase ( self : Union[str, Any] ,A : Any ,A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = observations
UpperCAmelCase__ : Any = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((UpperCAmelCase__ ) , ) : Tuple = np.shape(lowercase_ )
def to_minimize(A : int ) -> float:
UpperCAmelCase__ : Dict = 0
((UpperCAmelCase__ ) , ) : Any = np.shape(lowercase_ )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(lowercase_ )
UpperCAmelCase__ : str = LinearConstraint(lowercase_ ,0 ,0 )
UpperCAmelCase__ : List[Any] = Bounds(0 ,self.regularization )
UpperCAmelCase__ : Tuple = minimize(
lowercase_ ,np.ones(lowercase_ ) ,bounds=lowercase_ ,constraints=[ly_contraint] ).x
UpperCAmelCase__ : int = l_star
# calculating mean offset of separation plane to points
UpperCAmelCase__ : int = 0
for i in range(lowercase_ ):
for j in range(lowercase_ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
UpperCAmelCase__ : str = s / n
def __lowercase ( self : List[Any] ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,lowercase_ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if num < 0:
return False
_snake_case = num
_snake_case = 0
while num > 0:
_snake_case = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
import math
import unittest
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert isinstance(__a , __a ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(1_1 ) )
self.assertTrue(is_prime(1_3 ) )
self.assertTrue(is_prime(1_7 ) )
self.assertTrue(is_prime(1_9 ) )
self.assertTrue(is_prime(2_3 ) )
self.assertTrue(is_prime(2_9 ) )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(lowercase_ ):
is_prime(-1_9 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 203 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
UpperCAmelCase = ['bert-base-uncased', 'bert-base-cased']
UpperCAmelCase = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class __snake_case( tf.keras.Model ):
'''simple docstring'''
def __init__( self , A_ ) -> List[str]:
super().__init__()
lowerCAmelCase = tokenizer
lowerCAmelCase = AutoConfig.from_pretrained(lowercase_ )
lowerCAmelCase = TFAutoModel.from_config(lowercase_ )
def __snake_case ( self , A_ ) -> Optional[int]:
lowerCAmelCase = self.tokenizer(lowercase_ )
lowerCAmelCase = self.bert(**lowercase_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Dict:
super().setUp()
lowerCAmelCase = [
BertTokenizer.from_pretrained(lowercase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCAmelCase = [TFBertTokenizer.from_pretrained(lowercase_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowercase_ , use_fast_bert_tokenizer=lowercase_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we\'re going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
lowerCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __snake_case ( self ) -> List[str]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCAmelCase = tokenizer(lowercase_ , return_tensors="""tf""" , padding="""longest""" )
lowerCAmelCase = tf_tokenizer(lowercase_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __snake_case ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase = tf_tokenizer(self.paired_sentences )
lowerCAmelCase = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __snake_case ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase = tf.function(lowercase_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCAmelCase = tf.constant(lowercase_ )
lowerCAmelCase = compiled_tokenizer(lowercase_ )
lowerCAmelCase = tf_tokenizer(lowercase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __snake_case ( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase = ModelToSave(tokenizer=lowercase_ )
lowerCAmelCase = tf.convert_to_tensor(self.test_sentences )
lowerCAmelCase = model(lowercase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase = Path(lowercase_ ) / """saved.model"""
model.save(lowercase_ )
lowerCAmelCase = tf.keras.models.load_model(lowercase_ )
lowerCAmelCase = loaded_model(lowercase_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 ) | 433 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
from __future__ import annotations
lowercase : Union[str, Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __snake_case :
def __init__( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Tuple = graph
# mapping node to its parent in resulting breadth first tree
lowercase : Any = {}
lowercase : List[str] = source_vertex
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = {self.source_vertex}
lowercase : Any = None
lowercase : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
lowercase : Optional[int] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowercase_ )
lowercase : List[Any] = vertex
queue.append(lowercase_ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowercase : int = self.parent.get(lowercase_ )
if target_vertex_parent is None:
lowercase : Optional[int] = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(lowercase_ )
return self.shortest_path(lowercase_ ) + f"->{target_vertex}"
if __name__ == "__main__":
lowercase : Tuple = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 336 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase : List[Any] = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _A ( A ) -> Any:
if "://" in dataset_path:
lowercase : int = dataset_path.split("://" )[1]
return dataset_path
def _A ( A ) -> int:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _A ( A ,A ,A ) -> Optional[int]:
lowercase : Dict = not is_remote_filesystem(__a )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__a ) ,fs._strip_protocol(__a ) )
else:
fs.mv(__a ,__a ,recursive=__a )
def _A ( ) -> int:
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase : Union[str, Any] = None
lowercase : List[Any] = None
lowercase : List[Any] = threading.Lock()
| 372 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : List[str] = logging.get_logger()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True ):
"""simple docstring"""
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_SCREAMING_SNAKE_CASE : List[str] = timm.create_model("""levit_128s""" , pretrained=__a )
else:
_SCREAMING_SNAKE_CASE : str = timm.create_model("""levit_128""" , pretrained=__a )
if hidden_sizes == 192:
_SCREAMING_SNAKE_CASE : int = timm.create_model("""levit_192""" , pretrained=__a )
if hidden_sizes == 256:
_SCREAMING_SNAKE_CASE : Optional[Any] = timm.create_model("""levit_256""" , pretrained=__a )
if hidden_sizes == 384:
_SCREAMING_SNAKE_CASE : List[str] = timm.create_model("""levit_384""" , pretrained=__a )
from_model.eval()
_SCREAMING_SNAKE_CASE : List[str] = LevitForImageClassificationWithTeacher(__a ).eval()
_SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict()
_SCREAMING_SNAKE_CASE : Union[str, Any] = from_model.state_dict()
_SCREAMING_SNAKE_CASE : Any = list(from_model.state_dict().keys() )
_SCREAMING_SNAKE_CASE : List[Any] = list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
_SCREAMING_SNAKE_CASE : int = weights[og_keys[i]]
our_model.load_state_dict(__a )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn((2, 3, 224, 224) )
_SCREAMING_SNAKE_CASE : int = from_model(__a )
_SCREAMING_SNAKE_CASE : List[str] = our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
_SCREAMING_SNAKE_CASE : List[Any] = name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_SCREAMING_SNAKE_CASE : List[str] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE : Optional[int] = 1000
_SCREAMING_SNAKE_CASE : List[Any] = (1, num_labels)
_SCREAMING_SNAKE_CASE : Dict = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE : Any = num_labels
_SCREAMING_SNAKE_CASE : Dict = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE : List[Any] = {int(__a ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
_SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : str = partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
_SCREAMING_SNAKE_CASE : str = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
_SCREAMING_SNAKE_CASE : Dict = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
UpperCAmelCase_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 533 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 0 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase ( A__ , A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = np.argmax(__a , axis=1 )
return np.sum(outputs == labels )
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
with open(__a , encoding='utf_8' ) as f:
UpperCamelCase = csv.reader(__a )
UpperCamelCase = []
next(__a ) # skip the first line
for line in tqdm(__a ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = []
for dataset in encoded_datasets:
UpperCamelCase = len(__a )
UpperCamelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
UpperCamelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
UpperCamelCase = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
UpperCamelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__a ):
UpperCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCamelCase = with_conta
UpperCamelCase = with_conta
UpperCamelCase = len(__a ) - 1
UpperCamelCase = len(__a ) - 1
UpperCamelCase = with_conta
UpperCamelCase = with_conta
UpperCamelCase = mc_label
UpperCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) )
return tensor_datasets
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__a , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__a , type=__a , required=__a , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__a , default='' )
parser.add_argument('--eval_dataset' , type=__a , default='' )
parser.add_argument('--seed' , type=__a , default=42 )
parser.add_argument('--num_train_epochs' , type=__a , default=3 )
parser.add_argument('--train_batch_size' , type=__a , default=8 )
parser.add_argument('--eval_batch_size' , type=__a , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__a , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__a , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__a , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__a , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__a , default=6.2_5e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__a , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__a , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__a , default=0.01 )
parser.add_argument('--lm_coef' , type=__a , default=0.9 )
parser.add_argument('--n_valid' , type=__a , default=374 )
parser.add_argument('--server_ip' , type=__a , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__a , default='' , help='Can be used for distant debugging.' )
UpperCamelCase = parser.parse_args()
print(__a )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCamelCase = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__a , __a ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCamelCase = ['_start_', '_delimiter_', '_classify_']
UpperCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__a )
UpperCamelCase = tokenizer.convert_tokens_to_ids(__a )
UpperCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__a ) )
model.to(__a )
# Load and encode the datasets
def tokenize_and_encode(A__ ):
if isinstance(__a , __a ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) )
elif isinstance(__a , __a ):
return obj
return [tokenize_and_encode(__a ) for o in obj]
logger.info('Encoding dataset...' )
UpperCamelCase = load_rocstories_dataset(args.train_dataset )
UpperCamelCase = load_rocstories_dataset(args.eval_dataset )
UpperCamelCase = (train_dataset, eval_dataset)
UpperCamelCase = tokenize_and_encode(__a )
# Compute the max input length for the Transformer
UpperCamelCase = model.config.n_positions // 2 - 2
UpperCamelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCamelCase = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCamelCase = pre_process_datasets(__a , __a , __a , *__a )
UpperCamelCase , UpperCamelCase = tensor_datasets[0], tensor_datasets[1]
UpperCamelCase = TensorDataset(*__a )
UpperCamelCase = RandomSampler(__a )
UpperCamelCase = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size )
UpperCamelCase = TensorDataset(*__a )
UpperCamelCase = SequentialSampler(__a )
UpperCamelCase = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCamelCase = args.max_steps
UpperCamelCase = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1
else:
UpperCamelCase = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCamelCase = list(model.named_parameters() )
UpperCamelCase = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
UpperCamelCase = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
UpperCamelCase = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon )
UpperCamelCase = get_linear_schedule_with_warmup(
__a , num_warmup_steps=args.warmup_steps , num_training_steps=__a )
if args.do_train:
UpperCamelCase , UpperCamelCase , UpperCamelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = tqdm(__a , desc='Training' )
for step, batch in enumerate(__a ):
UpperCamelCase = tuple(t.to(__a ) for t in batch )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = batch
UpperCamelCase = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
UpperCamelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCamelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCamelCase = 'Training loss: {:.2e} lr: {:.2e}'.format(__a , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCamelCase = model.module if hasattr(__a , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCamelCase = os.path.join(args.output_dir , __a )
UpperCamelCase = os.path.join(args.output_dir , __a )
torch.save(model_to_save.state_dict() , __a )
model_to_save.config.to_json_file(__a )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__a )
if args.do_eval:
model.eval()
UpperCamelCase , UpperCamelCase = 0, 0
UpperCamelCase , UpperCamelCase = 0, 0
for batch in tqdm(__a , desc='Evaluating' ):
UpperCamelCase = tuple(t.to(__a ) for t in batch )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = batch
with torch.no_grad():
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = model(
__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a )
UpperCamelCase = mc_logits.detach().cpu().numpy()
UpperCamelCase = mc_labels.to('cpu' ).numpy()
UpperCamelCase = accuracy(__a , __a )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCamelCase = eval_loss / nb_eval_steps
UpperCamelCase = eval_accuracy / nb_eval_examples
UpperCamelCase = tr_loss / nb_tr_steps if args.do_train else None
UpperCamelCase = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
UpperCamelCase = os.path.join(args.output_dir , 'eval_results.txt' )
with open(__a , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __a , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 430 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ = "ClapFeatureExtractor"
lowerCAmelCase_ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
super().__init__(lowercase_ , lowercase_ )
def __call__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[Any]:
_snake_case = kwargs.pop("""sampling_rate""" , lowercase_ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
_snake_case = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if audios is not None:
_snake_case = self.feature_extractor(
lowercase_ , sampling_rate=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and audios is not None:
_snake_case = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def lowercase (self , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def lowercase (self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def lowercase (self ) -> Optional[Any]:
_snake_case = self.tokenizer.model_input_names
_snake_case = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) ) | 585 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCamelCase ( _A , _A , _A , _A , ) -> Union[str, Any]:
lowercase , lowercase : List[Any] = coefficient_matrix.shape
lowercase , lowercase : Optional[int] = constant_matrix.shape
if rowsa != colsa:
lowercase : Tuple = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__a )
if colsa != 1:
lowercase : Optional[Any] = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__a )
if rowsa != rowsa:
lowercase : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__a )
if len(__a ) != rowsa:
lowercase : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F"""matrix but received {len(__a )} and {rowsa}"""
)
raise ValueError(__a )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowercase : Optional[Any] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowercase , lowercase : Union[str, Any] = table.shape
strictly_diagonally_dominant(__a )
# Iterates the whole matrix for given number of times
for _ in range(__a ):
lowercase : int = []
for row in range(__a ):
lowercase : Optional[Any] = 0
for col in range(__a ):
if col == row:
lowercase : Any = table[row][col]
elif col == cols - 1:
lowercase : Dict = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowercase : Tuple = (temp + val) / denom
new_val.append(__a )
lowercase : Any = new_val
return [float(__a ) for i in new_val]
def UpperCamelCase ( _A ) -> Any:
lowercase , lowercase : int = table.shape
lowercase : Tuple = True
for i in range(0 , __a ):
lowercase : str = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="resnet50" , _lowerCAmelCase=3 , _lowerCAmelCase=32 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , ) -> Union[str, Any]:
'''simple docstring'''
lowercase = parent
lowercase = out_indices if out_indices is not None else [4]
lowercase = stage_names
lowercase = out_features
lowercase = backbone
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = use_pretrained_backbone
lowercase = is_training
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = self.get_config()
return config, pixel_values
def _a ( self ) -> Tuple:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = TimmBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase = model(lowercase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __UpperCamelCase (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
__A = (TimmBackbone,) if is_torch_available() else ()
__A = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = TimmBackboneModelTester(self )
lowercase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def _a ( self ) -> Dict:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = """resnet18"""
lowercase = """microsoft/resnet-18"""
lowercase = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ )
lowercase = AutoBackbone.from_pretrained(lowercase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowercase = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3] )
lowercase = AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn\'t support feed forward chunking""" )
def _a ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn\'t have num_hidden_layers attribute""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def _a ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def _a ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn\'t have hidden size info in its configuration.""" )
def _a ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip("""TimmBackbone doesn\'t support output_attentions.""" )
def _a ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Any:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(lowercase_ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowercase = self.all_model_classes[0]
lowercase = model_class(lowercase_ )
model.to(lowercase_ )
lowercase = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase = model(**lowercase_ )
lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowercase = copy.deepcopy(lowercase_ )
lowercase = None
lowercase = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowercase = copy.deepcopy(lowercase_ )
lowercase = False
lowercase = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase = model(**lowercase_ )
| 588 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
"""simple docstring"""
import requests
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {"""Content-Type""": """application/json"""}
UpperCAmelCase__ : List[Any] = requests.post(__a , json={"""text""": message_body} , headers=__a )
if response.status_code != 200:
UpperCAmelCase__ : Tuple = (
"""Request to slack returned an error """
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(__a )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 65 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
_snake_case = str(bin(__a ) )
binary_number += "0" * shift_amount
return binary_number
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
_snake_case = str(bin(__a ) )[2:]
if shift_amount >= len(__a ):
return "0b0"
_snake_case = binary_number[: len(__a ) - shift_amount]
return "0b" + shifted_binary_number
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
_snake_case = "0" + str(bin(__a ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
_snake_case = len(bin(__a )[3:] ) # Find 2's complement of number
_snake_case = bin(abs(__a ) - (1 << binary_number_length) )[3:]
_snake_case = (
"1" + "0" * (binary_number_length - len(__a )) + binary_number
)
if shift_amount >= len(__a ):
return "0b" + binary_number[0] * len(__a )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__a ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}" )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
__lowercase = parser.parse_args()
__lowercase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 203 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase : List[str] = "maskformer-swin"
UpperCAmelCase : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , A_=224 , A_=4 , A_=3 , A_=96 , A_=[2, 2, 6, 2] , A_=[3, 6, 12, 24] , A_=7 , A_=4.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=0.0_2 , A_=1e-5 , A_=None , A_=None , **A_ , ) -> Union[str, Any]:
super().__init__(**lowercase_ )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(lowercase_ )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase, lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names ) | 433 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
lowercase : Optional[Any] = TypeVar("""T""")
class __snake_case ( Generic[T] ):
def __init__( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = data
lowercase : List[str] = self
lowercase : Any = 0
class __snake_case ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
lowercase : List[str] = {}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = DisjointSetTreeNode(lowercase_ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = self.map[data]
if elem_ref != elem_ref.parent:
lowercase : Optional[Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if nodea.rank > nodea.rank:
lowercase : Optional[int] = nodea
else:
lowercase : str = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
self.link(self.find_set(lowercase_ ) ,self.find_set(lowercase_ ) )
class __snake_case ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
lowercase : Dict = {}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if node not in self.connections:
lowercase : List[str] = {}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
self.add_node(lowercase_ )
self.add_node(lowercase_ )
lowercase : Union[str, Any] = weight
lowercase : Optional[int] = weight
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = []
lowercase : Optional[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda snake_case : x[2] )
# creating the disjoint set
lowercase : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowercase_ )
# MST generation
lowercase : Tuple = 0
lowercase : List[Any] = 0
lowercase : Tuple = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowercase , lowercase , lowercase : Tuple = edges[index]
index += 1
lowercase : Optional[int] = disjoint_set.find_set(lowercase_ )
lowercase : Optional[int] = disjoint_set.find_set(lowercase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowercase_ ,lowercase_ ,lowercase_ )
disjoint_set.union(lowercase_ ,lowercase_ )
return graph
| 336 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Dict = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 372 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCAmelCase_ : List[Any] = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCAmelCase_ : int = '>>zh<<'
UpperCAmelCase_ : int = 'Helsinki-NLP/'
if is_torch_available():
UpperCAmelCase_ : str = 'pt'
elif is_tf_available():
UpperCAmelCase_ : Optional[int] = 'tf'
else:
UpperCAmelCase_ : List[Any] = 'jax'
@require_sentencepiece
class lowercase__ ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = MarianTokenizer
A_ : Tuple = False
A_ : int = True
def UpperCAmelCase_ ( self ):
super().setUp()
_SCREAMING_SNAKE_CASE : List[str] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
_SCREAMING_SNAKE_CASE : Tuple = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_SCREAMING_SNAKE_CASE : List[Any] = Path(self.tmpdirname )
save_json(lowercase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowercase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowercase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowercase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
_SCREAMING_SNAKE_CASE : List[str] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **__snake_case ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self , __snake_case ):
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = """</s>"""
_SCREAMING_SNAKE_CASE : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowercase_ ) , 9 )
def UpperCAmelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
_SCREAMING_SNAKE_CASE : int = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE : str = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowercase_ , batch.input_ids[0] )
_SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowercase_ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [x.name for x in Path(lowercase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , lowercase_ )
MarianTokenizer.from_pretrained(lowercase_ )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Optional[int] = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=lowercase_ , truncation=lowercase_ , return_tensors=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase_ , return_tensors=lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def UpperCAmelCase_ ( self ):
# fmt: off
_SCREAMING_SNAKE_CASE : List[str] = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
_SCREAMING_SNAKE_CASE : List[Any] = """Tämä on testi"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """This is a test"""
_SCREAMING_SNAKE_CASE : int = [76, 7, 2047, 2]
_SCREAMING_SNAKE_CASE : str = [69, 12, 11, 940, 2]
_SCREAMING_SNAKE_CASE : str = tokenizer(lowercase_ ).input_ids
self.assertListEqual(lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer(text_target=lowercase_ ).input_ids
self.assertListEqual(lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 533 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 0 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
UpperCamelCase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(A__ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
_lowerCamelCase : Optional[int] = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 430 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if not isinstance(__a , __a ):
_snake_case = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(__a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 585 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 0 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCAmelCase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCAmelCase = [ord(letter) for letter in string.ascii_lowercase]
_lowerCAmelCase = {ord(char) for char in VALID_CHARS}
_lowerCAmelCase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCamelCase ( _A , _A ) -> str:
lowercase : List[Any] = """"""
lowercase : Optional[int] = 42
lowercase : Union[str, Any] = 42
lowercase : int = 42
for keychar, cipherchar in zip(cycle(__a ) , __a ):
lowercase : Any = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__a )
return decoded
def UpperCamelCase ( _A ) -> Union[str, Any]:
lowercase : List[str] = []
for key in product(__a , repeat=3 ):
lowercase : List[Any] = try_key(__a , __a )
if encoded is not None:
possibles.append(__a )
return possibles
def UpperCamelCase ( _A , _A ) -> Optional[Any]:
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCamelCase ( _A = "p059_cipher.txt" ) -> Tuple:
lowercase : Optional[Any] = 42
lowercase : Optional[int] = 42
lowercase : List[str] = 42
lowercase : Union[str, Any] = 42
lowercase : int = Path(__a ).parent.joinpath(__a ).read_text(encoding="""utf-8""" )
lowercase : str = [int(__a ) for number in data.strip().split(""",""" )]
lowercase : Optional[Any] = filter_valid_chars(__a )
for common_word in COMMON_WORDS:
lowercase : List[Any] = filter_common_word(__a , __a )
if len(__a ) == 1:
break
lowercase : str = possibles[0]
return sum(ord(__a ) for char in decoded_text )
if __name__ == "__main__":
print(F'{solution() = }')
| 264 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def SCREAMING_SNAKE_CASE ( lowercase_ : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def SCREAMING_SNAKE_CASE ( lowercase_ : str = "" ):
if len(__a ) == 0:
return True
lowercase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase = {}
for character in lower_case_input_str:
lowercase = character_freq_dict.get(__a , 0 ) + 1
lowercase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def SCREAMING_SNAKE_CASE ( lowercase_ : str = "" ):
print("""\nFor string = """ , __a , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(__a ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(__a ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
lowercase_ : Any = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
lowercase_ : Optional[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 588 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowercase ( lowercase__ ):
snake_case_ = """dandelin/vilt-b32-finetuned-vqa"""
snake_case_ = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
snake_case_ = """image_qa"""
snake_case_ = AutoProcessor
snake_case_ = AutoModelForVisualQuestionAnswering
snake_case_ = ["""image""", """text"""]
snake_case_ = ["""text"""]
def __init__( self : Optional[int] ,*A : int ,**A : List[Any] ):
'''simple docstring'''
requires_backends(self ,["""vision"""] )
super().__init__(*lowercase_ ,**lowercase_ )
def __lowercase ( self : int ,A : Any ,A : List[Any] ):
'''simple docstring'''
return self.pre_processor(lowercase_ ,lowercase_ ,return_tensors="""pt""" )
def __lowercase ( self : Tuple ,A : Tuple ):
'''simple docstring'''
with torch.no_grad():
return self.model(**lowercase_ ).logits
def __lowercase ( self : str ,A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 65 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
debug_launcher(test_script.main )
def UpperCamelCase( self ):
debug_launcher(test_ops.main )
| 672 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _lowercase :
def __init__( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int]=1_4 , lowerCamelCase__ : str=7 , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=False , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[Any]=9_9 , lowerCamelCase__ : str=3_2 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[Any]=3_7 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Optional[int]=5_1_2 , lowerCamelCase__ : Dict=0.02 , ) -> int:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = rotary_dim
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = None
A_ = vocab_size - 1
A_ = vocab_size - 1
A_ = vocab_size - 1
def UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ) -> Any:
"""simple docstring"""
A_ = 2_0
A_ = model_class_name(lowercase_ )
A_ = model.init_cache(input_ids.shape[0] , lowercase_ )
A_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A_ = model(
input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , )
A_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model(
input_ids[:, -1:] , attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowercase_ , )
A_ = model(lowercase_ )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] ) -> str:
"""simple docstring"""
A_ = 2_0
A_ = model_class_name(lowercase_ )
A_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
A_ = model.init_cache(input_ids.shape[0] , lowercase_ )
A_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
A_ = model(
input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , )
A_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowercase_ , position_ids=lowercase_ , )
A_ = model(lowercase_ , attention_mask=lowercase_ )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
@require_flax
class _lowercase ( lowercase__,lowercase__,unittest.TestCase ):
_lowercase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_lowercase : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
A_ = FlaxGPTJModelTester(self )
def UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ ,A_ ,A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ ,A_ ,A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
@tooslow
def UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
A_ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
A_ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=lowercase_ , truncation=lowercase_ )
A_ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
A_ = False
A_ = model.config.eos_token_id
A_ = jax.jit(model.generate )
A_ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
A_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
A_ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(lowercase_ , lowercase_ )
@is_pt_flax_cross_test
def UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A_ = self._prepare_for_class(lowercase_ , lowercase_ )
A_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ = getattr(lowercase_ , lowercase_ )
A_ ,A_ = pt_inputs['''input_ids'''].shape
A_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase_ ):
A_ = 0
A_ = 1
A_ = 0
A_ = 1
A_ = pt_model_class(lowercase_ ).eval()
A_ = model_class(lowercase_ , dtype=jnp.floataa )
A_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ )
A_ = fx_state
with torch.no_grad():
A_ = pt_model(**lowercase_ ).to_tuple()
A_ = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(lowercase_ , lowercase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
A_ = model_class.from_pretrained(lowercase_ , from_pt=lowercase_ )
A_ = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(
len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(lowercase_ , lowercase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A_ = self._prepare_for_class(lowercase_ , lowercase_ )
A_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A_ = getattr(lowercase_ , lowercase_ )
A_ = pt_model_class(lowercase_ ).eval()
A_ = model_class(lowercase_ , dtype=jnp.floataa )
A_ = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params )
A_ ,A_ = pt_inputs['''input_ids'''].shape
A_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowercase_ ):
A_ = 0
A_ = 1
A_ = 0
A_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A_ = pt_model(**lowercase_ ).to_tuple()
A_ = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(lowercase_ , lowercase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
A_ = pt_model_class.from_pretrained(lowercase_ , from_flax=lowercase_ )
with torch.no_grad():
A_ = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(
len(lowercase_ ) , len(lowercase_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(lowercase_ , lowercase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
A_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
| 203 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
lowerCAmelCase = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(lowercase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1e-3 ) )
@slow
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
lowerCAmelCase = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(lowercase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1e-3 ) ) | 433 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self ,snake_case ,snake_case=7 ,snake_case=3 ,snake_case=18 ,snake_case=30 ,snake_case=400 ,snake_case=True ,snake_case=None ,snake_case=True ,snake_case=[0.5, 0.5, 0.5] ,snake_case=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
lowercase : str = size if size is not None else {"""height""": 18, """width""": 18}
lowercase : Optional[int] = parent
lowercase : List[Any] = batch_size
lowercase : Dict = num_channels
lowercase : List[Any] = image_size
lowercase : List[str] = min_resolution
lowercase : str = max_resolution
lowercase : Union[str, Any] = do_resize
lowercase : Any = size
lowercase : List[str] = do_normalize
lowercase : Tuple = image_mean
lowercase : str = image_std
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __snake_case ( lowercase__ , unittest.TestCase ):
_a : List[Any]= DPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = DPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,"""image_mean""" ) )
self.assertTrue(hasattr(lowercase_ ,"""image_std""" ) )
self.assertTrue(hasattr(lowercase_ ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowercase_ ,"""do_resize""" ) )
self.assertTrue(hasattr(lowercase_ ,"""size""" ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,Image.Image )
# Test not batched input
lowercase : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
lowercase : List[Any] = image_processing(lowercase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,np.ndarray )
# Test not batched input
lowercase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
lowercase : Any = image_processing(lowercase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
# Test not batched input
lowercase : Dict = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
lowercase : Dict = image_processing(lowercase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
| 336 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 372 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowercase__ ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = """glpn"""
def __init__( self , __snake_case=3 , __snake_case=4 , __snake_case=[2, 2, 2, 2] , __snake_case=[8, 4, 2, 1] , __snake_case=[32, 64, 160, 256] , __snake_case=[7, 3, 3, 3] , __snake_case=[4, 2, 2, 2] , __snake_case=[1, 2, 5, 8] , __snake_case=[4, 4, 4, 4] , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=0.1 , __snake_case=1e-6 , __snake_case=64 , __snake_case=10 , __snake_case=-1 , **__snake_case , ):
super().__init__(**lowercase_ )
_SCREAMING_SNAKE_CASE : List[str] = num_channels
_SCREAMING_SNAKE_CASE : str = num_encoder_blocks
_SCREAMING_SNAKE_CASE : int = depths
_SCREAMING_SNAKE_CASE : Union[str, Any] = sr_ratios
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
_SCREAMING_SNAKE_CASE : Optional[Any] = patch_sizes
_SCREAMING_SNAKE_CASE : Optional[int] = strides
_SCREAMING_SNAKE_CASE : Tuple = mlp_ratios
_SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
_SCREAMING_SNAKE_CASE : str = hidden_act
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
_SCREAMING_SNAKE_CASE : Any = drop_path_rate
_SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : List[Any] = decoder_hidden_size
_SCREAMING_SNAKE_CASE : Any = max_depth
_SCREAMING_SNAKE_CASE : Optional[Any] = head_in_index
| 533 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 0 |
'''simple docstring'''
import math
def __lowerCamelCase ( A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = [True] * n
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase = i * 2
while index < n:
UpperCamelCase = False
UpperCamelCase = index + i
UpperCamelCase = [2]
for i in range(3 , __a , 2 ):
if is_prime[i]:
primes.append(__a )
return primes
def __lowerCamelCase ( A__ = 999_966_663_333 ) -> int:
"""simple docstring"""
UpperCamelCase = math.floor(math.sqrt(__a ) ) + 100
UpperCamelCase = prime_sieve(__a )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase = primes[prime_index + 1]
UpperCamelCase = last_prime**2
UpperCamelCase = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 430 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class _lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ = "pix2struct_text_model"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , UpperCAmelCase=50244 , UpperCAmelCase=768 , UpperCAmelCase=64 , UpperCAmelCase=2048 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=32 , UpperCAmelCase=128 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-6 , UpperCAmelCase=1.0 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0 , UpperCAmelCase=False , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> str:
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = d_kv
_snake_case = d_ff
_snake_case = num_layers
_snake_case = num_heads
_snake_case = relative_attention_num_buckets
_snake_case = relative_attention_max_distance
_snake_case = dropout_rate
_snake_case = layer_norm_epsilon
_snake_case = initializer_factor
_snake_case = use_cache
_snake_case = eos_token_id
_snake_case = decoder_start_token_id
# for backwards compatibility
_snake_case = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def lowercase (cls , UpperCAmelCase , **UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
_snake_case, _snake_case = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_snake_case = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class _lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ = "pix2struct_vision_model"
def __init__(self , UpperCAmelCase=768 , UpperCAmelCase=768 , UpperCAmelCase=2048 , UpperCAmelCase=64 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase="gelu_new" , UpperCAmelCase=1e-6 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=1e-1_0 , UpperCAmelCase=1.0 , UpperCAmelCase=4096 , UpperCAmelCase=32 , UpperCAmelCase=128 , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**lowercase_ )
_snake_case = hidden_size
_snake_case = patch_embed_hidden_size
_snake_case = d_ff
_snake_case = dropout_rate
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = initializer_range
_snake_case = initializer_factor
_snake_case = attention_dropout
_snake_case = layer_norm_eps
_snake_case = dense_act_fn
_snake_case = seq_len
_snake_case = relative_attention_num_buckets
_snake_case = relative_attention_max_distance
_snake_case = d_kv
@classmethod
def lowercase (cls , UpperCAmelCase , **UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
_snake_case, _snake_case = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_snake_case = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class _lowerCAmelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ = "pix2struct"
lowerCAmelCase_ = True
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1.0 , UpperCAmelCase=0.02 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
_snake_case = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
_snake_case = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
_snake_case = PixaStructTextConfig(**lowercase_ )
_snake_case = PixaStructVisionConfig(**lowercase_ )
_snake_case = self.text_config.decoder_start_token_id
_snake_case = self.text_config.pad_token_id
_snake_case = self.text_config.eos_token_id
_snake_case = initializer_factor
_snake_case = initializer_range
_snake_case = self.initializer_range
_snake_case = self.initializer_range
_snake_case = is_vqa
@classmethod
def lowercase (cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def lowercase (self ) -> Union[str, Any]:
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.text_config.to_dict()
_snake_case = self.vision_config.to_dict()
_snake_case = self.__class__.model_type
return output | 585 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class UpperCamelCase (unittest.TestCase ):
def __init__( self :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :List[Any]=7 , __magic_name__ :Any=3 , __magic_name__ :List[Any]=18 , __magic_name__ :List[Any]=30 , __magic_name__ :Tuple=400 , __magic_name__ :List[Any]=True , __magic_name__ :Tuple=32 , __magic_name__ :Dict=True , ) ->str:
lowercase : int = parent
lowercase : Optional[int] = batch_size
lowercase : Tuple = num_channels
lowercase : Dict = image_size
lowercase : Union[str, Any] = min_resolution
lowercase : int = max_resolution
lowercase : Union[str, Any] = do_resize
lowercase : List[str] = size_divisor
lowercase : Tuple = do_rescale
def __snake_case ( self :List[str] ) ->Dict:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class UpperCamelCase (lowercase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = GLPNImageProcessor if is_vision_available() else None
def __snake_case ( self :Optional[int] ) ->str:
lowercase : Any = GLPNImageProcessingTester(self )
@property
def __snake_case ( self :str ) ->Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self :List[Any] ) ->List[Any]:
lowercase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowercase_ , """size_divisor""" ) )
self.assertTrue(hasattr(lowercase_ , """resample""" ) )
self.assertTrue(hasattr(lowercase_ , """do_rescale""" ) )
def __snake_case ( self :Dict ) ->Optional[int]:
pass
def __snake_case ( self :Union[str, Any] ) ->Optional[int]:
# Initialize image_processing
lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __snake_case ( self :Any ) ->int:
# Initialize image_processing
lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __snake_case ( self :Optional[Any] ) ->Optional[Any]:
# Initialize image_processing
lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 264 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowercase_ : Optional[Any] = TypeVar('''T''')
lowercase_ : Dict = TypeVar('''U''')
class __UpperCamelCase (Generic[T, U] ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = key
lowercase = val
lowercase = None
lowercase = None
def __repr__( self ) -> str:
'''simple docstring'''
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class __UpperCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
'''simple docstring'''
lowercase = DoubleLinkedListNode(lowercase_ , lowercase_ )
lowercase = DoubleLinkedListNode(lowercase_ , lowercase_ )
lowercase , lowercase = self.rear, self.head
def __repr__( self ) -> str:
'''simple docstring'''
lowercase = ["""DoubleLinkedList"""]
lowercase = self.head
while node.next is not None:
rep.append(str(lowercase_ ) )
lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase_ )
def _a ( self , _lowerCAmelCase ) -> None:
'''simple docstring'''
lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase = node
lowercase = previous
lowercase = node
lowercase = self.rear
def _a ( self , _lowerCAmelCase ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
lowercase = node.next
lowercase = node.prev
lowercase = None
lowercase = None
return node
class __UpperCamelCase (Generic[T, U] ):
__A = {}
def __init__( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = DoubleLinkedList()
lowercase = capacity
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = {}
def __repr__( self ) -> str:
'''simple docstring'''
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , _lowerCAmelCase ) -> bool:
'''simple docstring'''
return key in self.cache
def _a ( self , _lowerCAmelCase ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
lowercase = self.cache[key]
lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase_ )
return node.val
self.miss += 1
return None
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase = DoubleLinkedListNode(lowercase_ , lowercase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase = value
self.list.add(lowercase_ )
@classmethod
def _a ( cls , _lowerCAmelCase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(_lowerCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*_lowerCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase = LRUCache(lowercase_ )
lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase = func(*lowercase_ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase_ , """cache_info""" , lowercase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__UpperCAmelCase = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def lowerCAmelCase ( __UpperCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase__ : int = subparsers.add_parser("""tpu-config""" , description=_description )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
UpperCAmelCase__ : str = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=__a , default=__a , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=__a , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=__a , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
UpperCAmelCase__ : List[str] = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=__a , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__a ):
UpperCAmelCase__ : str = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase__ : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase__ : Dict = defaults.commands
if not args.tpu_name:
UpperCAmelCase__ : List[str] = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase__ : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase__ : Tuple = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
UpperCAmelCase__ : Tuple = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , __a ):
UpperCAmelCase__ : List[str] = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
UpperCAmelCase__ : Union[str, Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __a ):
UpperCAmelCase__ : Optional[int] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase__ : Optional[int] = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
UpperCAmelCase__ : List[Any] = """; """.join(__a )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase__ : Dict = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(__a )}" )
return
subprocess.run(__a )
print("""Successfully setup pod.""" )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tpu_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
tpu_command_launcher(__a )
| 65 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__magic_name__ : Any = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def snake_case_ ( SCREAMING_SNAKE_CASE__ = "dhaka" , SCREAMING_SNAKE_CASE__ = 5 ):
'''simple docstring'''
_snake_case = min(__a , 50 ) # Prevent abuse!
_snake_case = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
_snake_case = requests.get("https://www.google.com/search" , params=__a , headers=__a )
_snake_case = BeautifulSoup(html.text , "html.parser" )
_snake_case = "".join(
re.findall(r"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
_snake_case = json.dumps(__a )
_snake_case = json.loads(__a )
_snake_case = re.findall(
r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , __a , )
if not matched_google_image_data:
return 0
_snake_case = re.sub(
r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(__a ) , )
_snake_case = re.findall(
r"(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , __a , )
for index, fixed_full_res_image in enumerate(__a ):
if index >= max_images:
return index
_snake_case = bytes(__a , "ascii" ).decode(
"unicode-escape" )
_snake_case = bytes(__a , "ascii" ).decode(
"unicode-escape" )
_snake_case = urllib.request.build_opener()
_snake_case = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(__a )
_snake_case = f'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(__a ):
os.makedirs(__a )
urllib.request.urlretrieve( # noqa: S310
__a , f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
__magic_name__ : Optional[Any] = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print("""Please provide a search term.""")
raise
| 672 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowercase( __a : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
_lowerCAmelCase: str = parser.parse_args()
_lowerCAmelCase: Tuple = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 20 | 0 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert (
isinstance(__a , __a ) and number_of_steps > 0
), f"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
A_ ,A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ ,A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase: Tuple = get_logger(__name__)
_lowerCAmelCase: List[str] = Path(__file__).parent / 'model_card_template.md'
_lowerCAmelCase: Any = uuida().hex
_lowerCAmelCase: List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: int = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase: Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowercase( __a : Union[Dict, str, None] = None ):
a__ =f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"""; torch/{_torch_version}"""
if is_flax_available():
ua += f"""; jax/{_jax_version}"""
ua += f"""; flax/{_flax_version}"""
if is_onnx_available():
ua += f"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a , __a ):
ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
return ua
def _lowercase( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ):
if token is None:
a__ =HfFolder.get_token()
if organization is None:
a__ =whoami(__a )['name']
return f"""{username}/{model_id}"""
else:
return f"""{organization}/{model_id}"""
def _lowercase( __a : Union[str, Any] , __a : Dict ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__a , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a__ =args.hub_token if hasattr(__a , 'hub_token' ) else None
a__ =get_full_repo_name(__a , token=__a )
a__ =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__a , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__a , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a__ =os.path.join(args.output_dir , 'README.md' )
model_card.save(__a )
def _lowercase( __a : Optional[str] , __a : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
a__ =str(Path(__a ).as_posix() )
a__ =re.search(r'snapshots/([^/]+)/' , __a )
if search is None:
return None
a__ =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase: List[str] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_lowerCAmelCase: List[str] = os.path.join(hf_cache_home, 'diffusers')
def _lowercase( __a : Optional[str] = None , __a : Optional[str] = None ):
if new_cache_dir is None:
a__ =DIFFUSERS_CACHE
if old_cache_dir is None:
a__ =old_diffusers_cache
a__ =Path(__a ).expanduser()
a__ =Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a__ =new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a , exist_ok=__a )
os.replace(__a , __a )
try:
os.symlink(__a , __a )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase: Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_lowerCAmelCase: int = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase: List[Any] = int(f.read())
except ValueError:
_lowerCAmelCase: Any = 0
if cache_version < 1:
_lowerCAmelCase: str = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase: Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def _lowercase( __a : str , __a : Optional[str] = None ):
if variant is not None:
a__ =weights_name.split('.' )
a__ =splits[:-1] + [variant] + splits[-1:]
a__ ='.'.join(__a )
return weights_name
def _lowercase( __a : Union[str, Any] , *,
__a : Optional[Any] , __a : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : str , __a : int , __a : Tuple , __a : Union[str, Any] , __a : int=None , ):
a__ =str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a , __a ) ):
# Load from a PyTorch checkpoint
a__ =os.path.join(__a , __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a , __a , __a ) ):
a__ =os.path.join(__a , __a , __a )
return model_file
else:
raise EnvironmentError(
f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('0.20.0' )
):
try:
a__ =hf_hub_download(
__a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
warnings.warn(
f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __a , )
return model_file
except: # noqa: E722
warnings.warn(
f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added.""" , __a , )
try:
# 2. Load model file as usual
a__ =hf_hub_download(
__a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
f""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
f"""containing a file named {weights_name}""" )
| 20 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case( lowercase__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = (PNDMScheduler,)
UpperCAmelCase : Optional[int] = (("num_inference_steps", 50),)
def __snake_case ( self , **A_ ) -> Optional[int]:
lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**lowercase_ )
return config
def __snake_case ( self , A_=0 , **A_ ) -> List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop("""num_inference_steps""" , lowercase_ )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowerCAmelCase = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self ) -> Tuple:
pass
def __snake_case ( self , A_=0 , **A_ ) -> str:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop("""num_inference_steps""" , lowercase_ )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowerCAmelCase = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self , **A_ ) -> int:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase = scheduler_class(**lowercase_ )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase = model(lowercase_ , lowercase_ )
lowerCAmelCase = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase = model(lowercase_ , lowercase_ )
lowerCAmelCase = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def __snake_case ( self ) -> Dict:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop("""num_inference_steps""" , lowercase_ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowercase_ )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , """set_timesteps""" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , """set_timesteps""" ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowerCAmelCase = dummy_past_residuals[:]
lowerCAmelCase = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case ( self ) -> str:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def __snake_case ( self ) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def __snake_case ( self ) -> List[str]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def __snake_case ( self ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def __snake_case ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def __snake_case ( self ) -> Dict:
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def __snake_case ( self ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def __snake_case ( self ) -> Union[str, Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowerCAmelCase = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def __snake_case ( self ) -> Dict:
with self.assertRaises(lowercase_ ):
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __snake_case ( self ) -> int:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
lowerCAmelCase = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __snake_case ( self ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
lowerCAmelCase = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __snake_case ( self ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
lowerCAmelCase = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
lowerCAmelCase = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3 | 433 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 0 |
from string import ascii_lowercase, ascii_uppercase
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[str]:
if not sentence:
return ""
lowercase : int = dict(zip(__a , __a ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 336 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCAmelCase: int = logging.get_logger(__name__)
_lowerCAmelCase: Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_lowerCAmelCase: Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase( __a : Optional[Any] ):
a__ ={}
with open(__a , 'r' ) as file:
for line_number, line in enumerate(__a ):
a__ =line.strip()
if line:
a__ =line.split()
a__ =line_number
a__ =words[0]
a__ =value
return result
def _lowercase( __a : Dict , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : str ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ =getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
a__ =hf_pointer
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =shape_pointer.shape
# let's reduce dimension
a__ =value[0]
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
a__ =getattr(__a , __a )
a__ =value
else:
a__ =value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : int , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] ):
a__ =None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
a__ =PARAM_MAPPING[full_name.split('.' )[-1]]
a__ ='param'
if weight_type is not None and weight_type != "param":
a__ ='.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a__ ='.'.join([key, hf_param_name] )
else:
a__ =key
a__ =value if 'lm_head' in full_key else value[0]
_lowerCAmelCase: Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase( __a : Dict , __a : int , __a : int=None , __a : List[str]=None ):
a__ =False
for key, mapped_key in MAPPING.items():
a__ ='wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def _lowercase( __a : Union[str, Any] , __a : List[str] , __a : Dict ):
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
a__ =True
else:
a__ =load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase( __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] ):
a__ =full_name.split('conv_layers.' )[-1]
a__ =name.split('.' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a__ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def _lowercase( __a : str , __a : str , __a : Any=None , __a : str=None , __a : Any=True , __a : Union[str, Any]=False ):
if config_path is not None:
a__ =WavaVecaConfig.from_pretrained(__a )
else:
a__ =WavaVecaConfig()
if is_seq_class:
a__ =read_txt_into_dict(__a )
a__ =idalabel
a__ =WavaVecaForSequenceClassification(__a )
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
a__ =Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(__a , 'vocab.json' )
if not os.path.isdir(__a ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =0
a__ =1
with open(__a , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__a , __a )
a__ =WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
a__ =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
a__ =WavaVecaForCTC(__a )
else:
a__ =WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
a__ =argparse.Namespace(task='audio_pretraining' )
a__ =fairseq.tasks.setup_task(__a )
a__ , a__ , a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
a__ =model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_lowerCAmelCase: Tuple = parser.parse_args()
_lowerCAmelCase: Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 20 | 0 |
'''simple docstring'''
def _A ( A = 5_0_0_0_0_0_0_0 ) -> List[Any]:
lowercase : List[Any] = set()
lowercase : Dict = int((limit - 2_4) ** (1 / 2) )
lowercase : Any = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,__a ) ) )
for primea in primes:
lowercase : Optional[Any] = primea * primea
for primea in primes:
lowercase : Optional[Any] = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
lowercase : Union[str, Any] = primea * primea * primea * primea
lowercase : Tuple = square + cube + tetr
if total >= limit:
break
ret.add(__a )
return len(__a )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 372 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self) -> Optional[int]:
a__ =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowercase_).to(lowercase_)
a__ =AutoTokenizer.from_pretrained('google/mt5-small')
a__ =tokenizer('Hello there' , return_tensors='pt').input_ids
a__ =tokenizer('Hi I am' , return_tensors='pt').input_ids
a__ =model(input_ids.to(lowercase_) , labels=labels.to(lowercase_)).loss
a__ =-(labels.shape[-1] * loss.item())
a__ =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 20 | 0 |
'''simple docstring'''
from copy import deepcopy
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case = None , __snake_case = None ):
if arr is None and size is not None:
_SCREAMING_SNAKE_CASE : Any = size
_SCREAMING_SNAKE_CASE : List[Any] = [0] * size
elif arr is not None:
self.init(lowercase_ )
else:
raise ValueError("""Either arr or size must be specified""" )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = len(lowercase_ )
_SCREAMING_SNAKE_CASE : List[Any] = deepcopy(lowercase_ )
for i in range(1 , self.size ):
_SCREAMING_SNAKE_CASE : Any = self.next_(lowercase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_SCREAMING_SNAKE_CASE : Tuple = self.next_(lowercase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase_ ( __snake_case ):
return index + (index & (-index))
@staticmethod
def UpperCAmelCase_ ( __snake_case ):
return index - (index & (-index))
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_SCREAMING_SNAKE_CASE : Tuple = self.next_(lowercase_ )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
self.add(lowercase_ , value - self.get(lowercase_ ) )
def UpperCAmelCase_ ( self , __snake_case ):
if right == 0:
return 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_SCREAMING_SNAKE_CASE : Dict = self.prev(lowercase_ )
return result
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
return self.prefix(lowercase_ ) - self.prefix(lowercase_ )
def UpperCAmelCase_ ( self , __snake_case ):
return self.query(lowercase_ , index + 1 )
def UpperCAmelCase_ ( self , __snake_case ):
value -= self.tree[0]
if value < 0:
return -1
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_SCREAMING_SNAKE_CASE : int = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 533 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20 | 0 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_lowerCamelCase : Dict = True
except ImportError:
_lowerCamelCase : Tuple = False
try:
from torch.hub import _get_torch_home
_lowerCamelCase : Any = _get_torch_home()
except ImportError:
_lowerCamelCase : int = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
_lowerCamelCase : Optional[int] = os.path.join(torch_cache_home, "transformers")
_lowerCamelCase : List[Any] = 'https://cdn.huggingface.co'
_lowerCamelCase : List[str] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_lowerCamelCase : int = '/'.join(str(Path(__file__).resolve()).split("/")[:-1])
_lowerCamelCase : str = os.path.join(PATH, "config.yaml")
_lowerCamelCase : Tuple = os.path.join(PATH, "attributes.txt")
_lowerCamelCase : Dict = os.path.join(PATH, "objects.txt")
_lowerCamelCase : int = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
_lowerCamelCase : Union[str, Any] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
_lowerCamelCase : List[Any] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
_lowerCamelCase : Union[str, Any] = 'pytorch_model.bin'
_lowerCamelCase : Dict = 'config.yaml'
def __lowerCamelCase ( A__=OBJECTS , A__=ATTRIBUTES ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = []
with open(__a ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
UpperCamelCase = []
with open(__a ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = OrderedDict()
with open(__a , 'rb' ) as f:
UpperCamelCase = pkl.load(__a )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCamelCase = ckp.pop(__a )
if isinstance(__a , np.ndarray ):
UpperCamelCase = torch.tensor(__a )
else:
assert isinstance(__a , torch.tensor ), type(__a )
UpperCamelCase = v
return r
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] = "root" , UpperCamelCase__ : Union[str, Any]=0 ):
"""simple docstring"""
UpperCamelCase = name
UpperCamelCase = level
UpperCamelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCamelCase = copy.deepcopy(lowercase_ )
UpperCamelCase = copy.deepcopy(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
UpperCamelCase = Config(lowercase_ , name=lowercase_ , level=level + 1 )
UpperCamelCase = v
setattr(self , lowercase_ , lowercase_ )
UpperCamelCase = d
def __repr__( self : Dict ):
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = key.split('.' )
UpperCamelCase = len(lowercase_ ) - 1
UpperCamelCase = self._pointer
if len(lowercase_ ) > 1:
for i, l in enumerate(lowercase_ ):
if hasattr(self , lowercase_ ) and isinstance(getattr(self , lowercase_ ) , lowercase_ ):
setattr(getattr(self , lowercase_ ) , '.'.join(levels[i:] ) , lowercase_ )
if l == last_level:
UpperCamelCase = val
else:
UpperCamelCase = pointer[l]
def A ( self : List[str] ):
"""simple docstring"""
return self._pointer
def A ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(f"""{file_name}""" , 'w' ) as stream:
dump(lowercase_ , lowercase_ )
def A ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(f"""{file_name}""" , 'w' ) as stream:
json.dump(lowercase_ , lowercase_ )
@staticmethod
def A ( UpperCamelCase__ : str ):
"""simple docstring"""
with open(lowercase_ ) as stream:
UpperCamelCase = load(lowercase_ , Loader=lowercase_ )
return data
def __str__( self : Any ):
"""simple docstring"""
UpperCamelCase = ' '
if self._name != "root":
UpperCamelCase = f"""{t * (self._level-1)}{self._name}:\n"""
else:
UpperCamelCase = ''
UpperCamelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowercase_ , lowercase_ ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(lowercase_ ).__name__})\n"""
UpperCamelCase = level
return r[:-1]
@classmethod
def A ( cls : str , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = cls.get_config_dict(lowercase_ , **lowercase_ )
return cls(lowercase_ )
@classmethod
def A ( cls : List[Any] , UpperCamelCase__ : Dict , **UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = kwargs.pop('cache_dir' , lowercase_ )
UpperCamelCase = kwargs.pop('force_download' , lowercase_ )
UpperCamelCase = kwargs.pop('resume_download' , lowercase_ )
UpperCamelCase = kwargs.pop('proxies' , lowercase_ )
UpperCamelCase = kwargs.pop('local_files_only' , lowercase_ )
if os.path.isdir(lowercase_ ):
UpperCamelCase = os.path.join(lowercase_ , lowercase_ )
elif os.path.isfile(lowercase_ ) or is_remote_url(lowercase_ ):
UpperCamelCase = pretrained_model_name_or_path
else:
UpperCamelCase = hf_bucket_url(lowercase_ , filename=lowercase_ , use_cdn=lowercase_ )
try:
# Load from URL or cache if already cached
UpperCamelCase = cached_path(
lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , proxies=lowercase_ , resume_download=lowercase_ , local_files_only=lowercase_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCamelCase = Config.load_yaml(lowercase_ )
except EnvironmentError:
UpperCamelCase = 'Can\'t load config for'
raise EnvironmentError(lowercase_ )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(lowercase_ ), kwargs
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = torch.load('dump.pt' , map_location=in_tensor.device )
UpperCamelCase = in_tensor.numpy()
UpperCamelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__a , __a , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(__a , __a , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = urlparse(__a )
return parsed.scheme in ("http", "https")
def __lowerCamelCase ( A__ , A__ , A__=True ) -> Dict:
"""simple docstring"""
UpperCamelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCamelCase = '/' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def __lowerCamelCase ( A__ , A__ , A__=None , A__=0 , A__=None , ) -> Dict:
"""simple docstring"""
UpperCamelCase = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__a , __a ):
ua += "; " + "; ".join('{}/{}'.format(__a , __a ) for k, v in user_agent.items() )
elif isinstance(__a , __a ):
ua += "; " + user_agent
UpperCamelCase = {'user-agent': ua}
if resume_size > 0:
UpperCamelCase = 'bytes=%d-' % (resume_size,)
UpperCamelCase = requests.get(__a , stream=__a , proxies=__a , headers=__a )
if response.status_code == 416: # Range not satisfiable
return
UpperCamelCase = response.headers.get('Content-Length' )
UpperCamelCase = resume_size + int(__a ) if content_length is not None else None
UpperCamelCase = tqdm(
unit='B' , unit_scale=__a , total=__a , initial=__a , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__a ) )
temp_file.write(__a )
progress.close()
def __lowerCamelCase ( A__ , A__=None , A__=False , A__=None , A__=10 , A__=False , A__=None , A__=False , ) -> Dict:
"""simple docstring"""
if cache_dir is None:
UpperCamelCase = TRANSFORMERS_CACHE
if isinstance(__a , __a ):
UpperCamelCase = str(__a )
os.makedirs(__a , exist_ok=__a )
UpperCamelCase = None
if not local_files_only:
try:
UpperCamelCase = requests.head(__a , allow_redirects=__a , proxies=__a , timeout=__a )
if response.status_code == 200:
UpperCamelCase = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCamelCase = url_to_filename(__a , __a )
# get cache path to put the file
UpperCamelCase = os.path.join(__a , __a )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__a ):
return cache_path
else:
UpperCamelCase = [
file
for file in fnmatch.filter(os.listdir(__a ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(__a ) > 0:
return os.path.join(__a , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(__a ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCamelCase = cache_path + '.lock'
with FileLock(__a ):
# If the download just completed while the lock was activated.
if os.path.exists(__a ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCamelCase = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(__a , 'a+b' ) as f:
yield f
UpperCamelCase = _resumable_file_manager
if os.path.exists(__a ):
UpperCamelCase = os.stat(__a ).st_size
else:
UpperCamelCase = 0
else:
UpperCamelCase = partial(tempfile.NamedTemporaryFile , dir=__a , delete=__a )
UpperCamelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , __a , temp_file.name , )
http_get(
__a , __a , proxies=__a , resume_size=__a , user_agent=__a , )
os.replace(temp_file.name , __a )
UpperCamelCase = {'url': url, 'etag': etag}
UpperCamelCase = cache_path + '.json'
with open(__a , 'w' ) as meta_file:
json.dump(__a , __a )
return cache_path
def __lowerCamelCase ( A__ , A__=None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = url.encode('utf-8' )
UpperCamelCase = shaaaa(__a )
UpperCamelCase = url_hash.hexdigest()
if etag:
UpperCamelCase = etag.encode('utf-8' )
UpperCamelCase = shaaaa(__a )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def __lowerCamelCase ( A__ , A__=None , A__=False , A__=None , A__=False , A__=None , A__=False , A__=False , A__=False , ) -> List[Any]:
"""simple docstring"""
if cache_dir is None:
UpperCamelCase = TRANSFORMERS_CACHE
if isinstance(__a , __a ):
UpperCamelCase = str(__a )
if isinstance(__a , __a ):
UpperCamelCase = str(__a )
if is_remote_url(__a ):
# URL, so get it from the cache (downloading if necessary)
UpperCamelCase = get_from_cache(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , user_agent=__a , local_files_only=__a , )
elif os.path.exists(__a ):
# File, and it exists.
UpperCamelCase = url_or_filename
elif urlparse(__a ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(__a ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(__a ) )
if extract_compressed_file:
if not is_zipfile(__a ) and not tarfile.is_tarfile(__a ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCamelCase , UpperCamelCase = os.path.split(__a )
UpperCamelCase = output_file.replace('.' , '-' ) + '-extracted'
UpperCamelCase = os.path.join(__a , __a )
if os.path.isdir(__a ) and os.listdir(__a ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCamelCase = output_path + '.lock'
with FileLock(__a ):
shutil.rmtree(__a , ignore_errors=__a )
os.makedirs(__a )
if is_zipfile(__a ):
with ZipFile(__a , 'r' ) as zip_file:
zip_file.extractall(__a )
zip_file.close()
elif tarfile.is_tarfile(__a ):
UpperCamelCase = tarfile.open(__a )
tar_file.extractall(__a )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(__a ) )
return output_path_extracted
return output_path
def __lowerCamelCase ( A__ , A__="," ) -> Tuple:
"""simple docstring"""
assert isinstance(__a , __a )
if os.path.isfile(__a ):
with open(__a ) as f:
UpperCamelCase = eval(f.read() )
else:
UpperCamelCase = requests.get(__a )
try:
UpperCamelCase = requests.json()
except Exception:
UpperCamelCase = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCamelCase = eval(__a )
except Exception:
UpperCamelCase = data.split('\n' )
req.close()
return data
def __lowerCamelCase ( A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = requests.get(__a )
UpperCamelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__a )
with open(__a , 'rb' ) as stream:
UpperCamelCase = pkl.load(__a )
UpperCamelCase = weights.pop('model' )
UpperCamelCase = {}
for k, v in model.items():
UpperCamelCase = torch.from_numpy(__a )
if "running_var" in k:
UpperCamelCase = torch.tensor([0] )
UpperCamelCase = k.replace('running_var' , 'num_batches_tracked' )
UpperCamelCase = zero
return new
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
print(F"""{os.path.abspath(os.path.join(__a , os.pardir ) )}/demo.ipynb""" )
def __lowerCamelCase ( A__ , A__="RGB" ) -> Any:
"""simple docstring"""
assert isinstance(__a , __a )
if os.path.isfile(__a ):
UpperCamelCase = cva.imread(__a )
else:
UpperCamelCase = get_image_from_url(__a )
assert img is not None, F"""could not connect to: {im}"""
UpperCamelCase = cva.cvtColor(__a , cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCamelCase = img[:, :, ::-1]
return img
def __lowerCamelCase ( A__ , A__=1 ) -> Optional[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(__a ) , __a ))
| 430 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionLDMaDPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase (self ) -> Any:
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_snake_case = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_snake_case = CLIPTextModel(lowercase_ )
_snake_case = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase (self , UpperCAmelCase , UpperCAmelCase=0 ) -> List[str]:
if str(lowercase_ ).startswith("""mps""" ):
_snake_case = torch.manual_seed(lowercase_ )
else:
_snake_case = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
_snake_case = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> Any:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionLDMaDPipeline(**lowercase_ )
_snake_case = ldmad_pipe.to(lowercase_ )
ldmad_pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case = self.get_dummy_inputs(lowercase_ )
_snake_case = ldmad_pipe(**lowercase_ )
_snake_case, _snake_case = output.rgb, output.depth
_snake_case = rgb[0, -3:, -3:, -1]
_snake_case = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_snake_case = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
_snake_case = np.array([103.46727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def lowercase (self ) -> Optional[int]:
_snake_case = self.get_dummy_components()
_snake_case = StableDiffusionLDMaDPipeline(**lowercase_ )
_snake_case = ldmad_pipe.to(lowercase_ )
ldmad_pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case = self.get_dummy_inputs(lowercase_ )
_snake_case = 3 * [inputs["""prompt"""]]
# forward
_snake_case = ldmad_pipe(**lowercase_ )
_snake_case, _snake_case = output.rgb, output.depth
_snake_case = rgb_slice_a[0, -3:, -3:, -1]
_snake_case = depth_slice_a[0, -3:, -1]
_snake_case = self.get_dummy_inputs(lowercase_ )
_snake_case = 3 * [inputs.pop("""prompt""" )]
_snake_case = ldmad_pipe.tokenizer(
lowercase_ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="""pt""" , )
_snake_case = text_inputs["""input_ids"""].to(lowercase_ )
_snake_case = ldmad_pipe.text_encoder(lowercase_ )[0]
_snake_case = prompt_embeds
# forward
_snake_case = ldmad_pipe(**lowercase_ )
_snake_case, _snake_case = output.rgb, output.depth
_snake_case = rgb_slice_a[0, -3:, -3:, -1]
_snake_case = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def lowercase (self ) -> Dict:
_snake_case = """cpu""" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = PNDMScheduler(skip_prk_steps=lowercase_ )
_snake_case = StableDiffusionLDMaDPipeline(**lowercase_ )
_snake_case = ldmad_pipe.to(lowercase_ )
ldmad_pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case = self.get_dummy_inputs(lowercase_ )
_snake_case = """french fries"""
_snake_case = ldmad_pipe(**lowercase_ , negative_prompt=lowercase_ )
_snake_case, _snake_case = output.rgb, output.depth
_snake_case = rgb[0, -3:, -3:, -1]
_snake_case = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_snake_case = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
_snake_case = np.array([107.84738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase (self , UpperCAmelCase , UpperCAmelCase="cpu" , UpperCAmelCase=torch.floataa , UpperCAmelCase=0 ) -> Optional[Any]:
_snake_case = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
_snake_case = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 64, 64) )
_snake_case = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ )
_snake_case = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> List[Any]:
_snake_case = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
_snake_case = ldmad_pipe.to(lowercase_ )
ldmad_pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case = self.get_inputs(lowercase_ )
_snake_case = ldmad_pipe(**lowercase_ )
_snake_case, _snake_case = output.rgb, output.depth
_snake_case = rgb[0, -3:, -3:, -1].flatten()
_snake_case = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_snake_case = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
_snake_case = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase (self , UpperCAmelCase , UpperCAmelCase="cpu" , UpperCAmelCase=torch.floataa , UpperCAmelCase=0 ) -> List[str]:
_snake_case = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
_snake_case = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 64, 64) )
_snake_case = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ )
_snake_case = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> List[Any]:
_snake_case = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(lowercase_ )
ldmad_pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case = self.get_inputs(lowercase_ )
_snake_case = ldmad_pipe(**lowercase_ )
_snake_case, _snake_case = output.rgb, output.depth
_snake_case = 0.49_5586
_snake_case = 0.3379_5515
_snake_case = 112.48518
_snake_case = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def lowercase (self ) -> Optional[int]:
_snake_case = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(lowercase_ )
ldmad_pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case = self.get_inputs(lowercase_ )
_snake_case = ldmad_pipe(**lowercase_ )
_snake_case, _snake_case = output.rgb, output.depth
_snake_case = 0.419_4127
_snake_case = 0.3537_5586
_snake_case = 0.563_8502
_snake_case = 0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3 | 585 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ) -> Union[str, Any]:
a__ =parent
a__ =out_indices if out_indices is not None else [4]
a__ =stage_names
a__ =out_features
a__ =backbone
a__ =batch_size
a__ =image_size
a__ =num_channels
a__ =use_pretrained_backbone
a__ =is_training
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ =self.get_config()
return config, pixel_values
def __UpperCamelCase ( self) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> str:
a__ =TimmBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
a__ =model(lowercase_)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __UpperCamelCase ( self) -> str:
a__ =self.prepare_config_and_inputs()
a__ , a__ =config_and_inputs
a__ ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ (lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case =(TimmBackbone,) if is_torch_available() else ()
snake_case ={'feature-extraction': TimmBackbone} if is_torch_available() else {}
snake_case =False
snake_case =False
snake_case =False
snake_case =False
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =TimmBackboneModelTester(self)
a__ =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCamelCase ( self) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self) -> str:
a__ ='resnet18'
a__ ='microsoft/resnet-18'
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_)
a__ =AutoBackbone.from_pretrained(lowercase_)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
a__ =AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3])
a__ =AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> Any:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> Dict:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def __UpperCamelCase ( self) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def __UpperCamelCase ( self) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def __UpperCamelCase ( self) -> int:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def __UpperCamelCase ( self) -> str:
pass
@unittest.skip('Safetensors is not supported by timm.')
def __UpperCamelCase ( self) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self) -> Optional[Any]:
pass
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
a__ =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
a__ =self.has_attentions
# no need to test all models as different heads yield the same functionality
a__ =self.all_model_classes[0]
a__ =model_class(lowercase_)
model.to(lowercase_)
a__ =self._prepare_for_class(lowercase_ , lowercase_)
a__ =model(**lowercase_)
a__ =outputs[0][-1]
# Encoder-/Decoder-only models
a__ =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a__ =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def __UpperCamelCase ( self) -> List[str]:
a__ , a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
a__ =copy.deepcopy(lowercase_)
a__ =None
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
a__ =copy.deepcopy(lowercase_)
a__ =False
a__ =model_class(lowercase_)
model.to(lowercase_)
model.eval()
a__ =model(**lowercase_)
| 20 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase (lowercase__ , lowercase__ , lowercase__ ):
@register_to_config
def __init__( self :Any , __magic_name__ :int , __magic_name__ :Dict , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :int , __magic_name__ :Union[str, Any] , __magic_name__ :Any , __magic_name__ :Optional[int] = False , ) ->str:
super().__init__()
lowercase : Any = nn.Embedding(lowercase_ , lowercase_ )
lowercase : Optional[Any] = nn.Embedding(lowercase_ , lowercase_ )
lowercase : List[Any] = False
lowercase : Union[str, Any] = nn.Dropout(p=lowercase_ )
lowercase : List[str] = TaConfig(
vocab_size=lowercase_ , d_model=lowercase_ , num_heads=lowercase_ , d_kv=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ , feed_forward_proj=lowercase_ , is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , )
lowercase : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowercase_ ):
lowercase : Union[str, Any] = TaBlock(lowercase_ )
self.encoders.append(lowercase_ )
lowercase : Union[str, Any] = TaLayerNorm(lowercase_ )
lowercase : int = nn.Dropout(p=lowercase_ )
def __snake_case ( self :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :int ) ->Tuple:
lowercase : List[Any] = self.token_embedder(lowercase_ )
lowercase : Dict = encoder_input_tokens.shape[1]
lowercase : str = torch.arange(lowercase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowercase_ )
lowercase : Any = self.dropout_pre(lowercase_ )
# inverted the attention mask
lowercase : str = encoder_input_tokens.size()
lowercase : Optional[Any] = self.get_extended_attention_mask(lowercase_ , lowercase_ )
for lyr in self.encoders:
lowercase : Tuple = lyr(lowercase_ , lowercase_ )[0]
lowercase : Union[str, Any] = self.layer_norm(lowercase_ )
return self.dropout_post(lowercase_ ), encoder_inputs_mask
| 264 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase: Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase: List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase: List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 20 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : list[int] ):
lowercase = len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
lowercase , lowercase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowercase_ : int = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 588 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: str = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ (lowercase__ ):
snake_case ='big_bird'
def __init__( self , lowercase_=50358 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=4096 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=66 , lowercase_="block_sparse" , lowercase_=True , lowercase_=False , lowercase_=64 , lowercase_=3 , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
a__ =vocab_size
a__ =max_position_embeddings
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =initializer_range
a__ =type_vocab_size
a__ =layer_norm_eps
a__ =use_cache
a__ =rescale_embeddings
a__ =attention_type
a__ =use_bias
a__ =block_size
a__ =num_random_blocks
a__ =classifier_dropout
class lowercase_ (lowercase__ ):
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 20 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowercase__ ) , """Tatoeba directory does not exist.""" )
class __lowercase ( unittest.TestCase ):
@cached_property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowercase_ )
@slow
def __lowercase ( self : Any ):
'''simple docstring'''
self.resolver.convert_models(["""heb-eng"""] )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.resolver.write_model_card("""opus-mt-he-en""" ,dry_run=lowercase_ )
assert mmeta["long_pair"] == "heb-eng"
| 65 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=2 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=36 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=6 , lowerCamelCase=6 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , lowerCamelCase=1_000 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = patch_size
_snake_case = text_seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = coordinate_size
_snake_case = shape_size
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
_snake_case = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_snake_case = text_seq_length
_snake_case = (image_size // patch_size) ** 2 + 1
_snake_case = self.text_seq_length + self.image_seq_length
def UpperCamelCase( self ):
_snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_snake_case = bbox[i, j, 3]
_snake_case = bbox[i, j, 1]
_snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_snake_case = bbox[i, j, 2]
_snake_case = bbox[i, j, 0]
_snake_case = t
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.text_seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_snake_case = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = LayoutLMvaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# text + image
_snake_case = model(lowercase_ , pixel_values=lowercase_ )
_snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
_snake_case = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_ )
_snake_case = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_snake_case = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_snake_case = model(pixel_values=lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = self.num_labels
_snake_case = LayoutLMvaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = self.num_labels
_snake_case = LayoutLMvaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = LayoutLMvaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Tuple = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : str = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCamelCase( self ):
_snake_case = LayoutLMvaModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
_snake_case = copy.deepcopy(lowercase_ )
if model_class in get_values(lowercase_ ):
_snake_case = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowercase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_ ):
_snake_case = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in get_values(lowercase_ ):
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in [
*get_values(lowercase_ ),
]:
_snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in [
*get_values(lowercase_ ),
]:
_snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , )
return inputs_dict
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def UpperCamelCase( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = LayoutLMvaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def snake_case_ ( ):
'''simple docstring'''
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ):
return LayoutLMvaImageProcessor(apply_ocr=lowercase_ ) if is_vision_available() else None
@slow
def UpperCamelCase( self ):
_snake_case = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowercase_ )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase_ , return_tensors="pt" ).pixel_values.to(lowercase_ )
_snake_case = torch.tensor([[1, 2]] )
_snake_case = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_snake_case = model(
input_ids=input_ids.to(lowercase_ ) , bbox=bbox.to(lowercase_ ) , pixel_values=pixel_values.to(lowercase_ ) , )
# verify the logits
_snake_case = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase_ )
_snake_case = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1e-4 ) )
| 672 |
from __future__ import annotations
from typing import Any
class lowercase_ :
def __init__( self , lowercase_) -> None:
a__ =num_of_nodes
a__ =[]
a__ ={}
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
self.m_edges.append([u_node, v_node, weight])
def __UpperCamelCase ( self , lowercase_) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def __UpperCamelCase ( self , lowercase_) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
a__ =self.find_component(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_) -> None:
if component_size[u_node] <= component_size[v_node]:
a__ =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_)
elif component_size[u_node] >= component_size[v_node]:
a__ =self.find_component(lowercase_)
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_)
def __UpperCamelCase ( self) -> None:
a__ =[]
a__ =0
a__ =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a__ =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a__ =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_):
a__ , a__ , a__ =edge
a__ =self.m_component[u]
a__ =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a__ =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 | 0 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 50 ):
'''simple docstring'''
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 203 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCAmelCase: Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCAmelCase: Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCAmelCase: List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> Any:
if rouge_types is None:
a__ =['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__ =rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_)
if use_aggregator:
a__ =scoring.BootstrapAggregator()
else:
a__ =[]
for ref, pred in zip(lowercase_ , lowercase_):
a__ =scorer.score(lowercase_ , lowercase_)
if use_aggregator:
aggregator.add_scores(lowercase_)
else:
scores.append(lowercase_)
if use_aggregator:
a__ =aggregator.aggregate()
else:
a__ ={}
for key in scores[0]:
a__ =[score[key] for score in scores]
return result
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __snake_case:
'''simple docstring'''
def __init__( self , A_ ) -> None:
lowerCAmelCase = num_of_nodes
lowerCAmelCase = []
lowerCAmelCase = {}
def __snake_case ( self , A_ , A_ , A_ ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def __snake_case ( self , A_ ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __snake_case ( self , A_ ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase = self.find_component(lowercase_ )
def __snake_case ( self , A_ , A_ , A_ ) -> None:
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase = self.find_component(lowercase_ )
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_ )
def __snake_case ( self ) -> None:
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = edge
lowerCAmelCase = self.m_component[u]
lowerCAmelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = edge
lowerCAmelCase = self.m_component[u]
lowerCAmelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_ )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
lowerCAmelCase = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def _snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 |
from __future__ import annotations
_lowerCAmelCase: str = '#'
class lowercase_ :
def __init__( self) -> None:
a__ ={}
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =self._trie
for char in text:
if char not in trie:
a__ ={}
a__ =trie[char]
a__ =True
def __UpperCamelCase ( self , lowercase_) -> tuple | list:
a__ =self._trie
for char in prefix:
if char in trie:
a__ =trie[char]
else:
return []
return self._elements(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> tuple:
a__ =[]
for c, v in d.items():
a__ =[' '] if c == END else [(c + s) for s in self._elements(lowercase_)]
result.extend(lowercase_)
return tuple(lowercase_)
_lowerCAmelCase: Optional[int] = Trie()
_lowerCAmelCase: List[str] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _lowercase( __a : str ):
a__ =trie.find_word(__a )
return tuple(string + word for word in suffixes )
def _lowercase( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 | 0 |
from __future__ import annotations
lowercase : Tuple = tuple[int, int, int]
lowercase : Any = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowercase : int = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
lowercase : Optional[int] = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
lowercase : str = 'FOBHMDKEXQNRAULPGSJVTYICZW'
lowercase : List[Any] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
lowercase : Tuple = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
lowercase : str = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
lowercase : Any = 'SGLCPQWZHKXAREONTFBVIYJUDM'
lowercase : str = 'HVSICLTYKQUBXDWAJZOMFGPREN'
lowercase : Tuple = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
lowercase : Optional[Any] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
lowercase : List[str] = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(__a ) )) < 3:
lowercase : Tuple = f"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(__a )
# Checks if rotor positions are valid
lowercase , lowercase , lowercase : Optional[Any] = rotpos
if not 0 < rotorposa <= len(__a ):
lowercase : Dict = f"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(__a )
if not 0 < rotorposa <= len(__a ):
lowercase : Optional[Any] = f"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(__a )
if not 0 < rotorposa <= len(__a ):
lowercase : Tuple = f"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(__a )
# Validates string and returns dict
lowercase : Tuple = _plugboard(__a )
return rotpos, rotsel, pbdict
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(__a , __a ):
lowercase : Optional[Any] = f"Plugboard setting isn't type string ({type(__a )})"
raise TypeError(__a )
elif len(__a ) % 2 != 0:
lowercase : Tuple = f"Odd number of symbols ({len(__a )})"
raise Exception(__a )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
lowercase : str = set()
for i in pbstring:
if i not in abc:
lowercase : Any = f"'{i}' not in list of symbols"
raise Exception(__a )
elif i in tmppbl:
lowercase : Optional[int] = f"Duplicate symbol ({i})"
raise Exception(__a )
else:
tmppbl.add(__a )
del tmppbl
# Created the dictionary
lowercase : List[str] = {}
for j in range(0 , len(__a ) - 1 , 2 ):
lowercase : List[str] = pbstring[j + 1]
lowercase : Any = pbstring[j]
return pb
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (rotora, rotora, rotora) , SCREAMING_SNAKE_CASE__ = "" , ) -> List[Any]:
lowercase : List[str] = text.upper()
lowercase , lowercase , lowercase : Any = _validator(
__a , __a , plugb.upper() )
lowercase , lowercase , lowercase : Dict = rotor_position
lowercase , lowercase , lowercase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowercase : Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowercase : Optional[int] = plugboard[symbol]
# rotor ra --------------------------
lowercase : Optional[int] = abc.index(__a ) + rotorposa
lowercase : int = rotora[index % len(__a )]
# rotor rb --------------------------
lowercase : int = abc.index(__a ) + rotorposa
lowercase : Any = rotora[index % len(__a )]
# rotor rc --------------------------
lowercase : Any = abc.index(__a ) + rotorposa
lowercase : Optional[Any] = rotora[index % len(__a )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowercase : Optional[Any] = reflector[symbol]
# 2nd rotors
lowercase : List[Any] = abc[rotora.index(__a ) - rotorposa]
lowercase : int = abc[rotora.index(__a ) - rotorposa]
lowercase : Tuple = abc[rotora.index(__a ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowercase : Optional[int] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(__a ):
lowercase : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(__a ):
lowercase : List[str] = 0
rotorposa += 1
if rotorposa >= len(__a ):
lowercase : Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(__a )
return "".join(__a )
if __name__ == "__main__":
lowercase : List[Any] = 'This is my Python script that emulates the Enigma machine from WWII.'
lowercase : int = (1, 1, 1)
lowercase : Any = 'pictures'
lowercase : Any = (rotora, rotora, rotora)
lowercase : Any = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 336 |
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 372 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
_SCREAMING_SNAKE_CASE : List[Any] = sum(__a ) / len(__a ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 533 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ (lowercase__ , unittest.TestCase ):
snake_case =KandinskyVaaPriorPipeline
snake_case =['prompt']
snake_case =['prompt', 'negative_prompt']
snake_case =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
snake_case =False
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self) -> Tuple:
return 32
@property
def __UpperCamelCase ( self) -> int:
return self.time_input_dim
@property
def __UpperCamelCase ( self) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self) -> Optional[int]:
return 100
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self) -> Union[str, Any]:
torch.manual_seed(0)
a__ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def __UpperCamelCase ( self) -> Tuple:
torch.manual_seed(0)
a__ ={
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
a__ =PriorTransformer(**lowercase_)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a__ =nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def __UpperCamelCase ( self) -> Any:
torch.manual_seed(0)
a__ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a__ =CLIPVisionModelWithProjection(lowercase_)
return model
@property
def __UpperCamelCase ( self) -> Optional[int]:
a__ =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
def __UpperCamelCase ( self) -> Any:
a__ =self.dummy_prior
a__ =self.dummy_image_encoder
a__ =self.dummy_text_encoder
a__ =self.dummy_tokenizer
a__ =self.dummy_image_processor
a__ =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
a__ ={
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def __UpperCamelCase ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
a__ =torch.manual_seed(lowercase_)
else:
a__ =torch.Generator(device=lowercase_).manual_seed(lowercase_)
a__ ={
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self) -> int:
a__ ='cpu'
a__ =self.get_dummy_components()
a__ =self.pipeline_class(**lowercase_)
a__ =pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
a__ =pipe(**self.get_dummy_inputs(lowercase_))
a__ =output.image_embeds
a__ =pipe(
**self.get_dummy_inputs(lowercase_) , return_dict=lowercase_ , )[0]
a__ =image[0, -10:]
a__ =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a__ =np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self) -> List[Any]:
a__ =torch_device == 'cpu'
a__ =True
a__ =False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def __UpperCamelCase ( self) -> Optional[int]:
a__ =torch_device == 'cpu'
a__ =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 20 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : Union[str, Any] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_lowerCamelCase : Dict = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_lowerCamelCase : List[Any] = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Any=False ):
"""simple docstring"""
if rouge_types is None:
UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=lowercase_ , use_stemmer=lowercase_ )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(lowercase_ , lowercase_ ):
UpperCamelCase = scorer.score(lowercase_ , lowercase_ )
if use_aggregator:
aggregator.add_scores(lowercase_ )
else:
scores.append(lowercase_ )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 430 |
from manim import *
class lowercase_ (lowercase__ ):
def __UpperCamelCase ( self) -> List[Any]:
a__ =Rectangle(height=0.5 , width=0.5)
a__ =Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a__ =[mem.copy() for i in range(6)]
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('CPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(4)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('GPU' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.move_to([-1, -1, 0])
self.add(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Model' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.add(lowercase_)
a__ =[]
for i, rect in enumerate(lowercase_):
rect.set_stroke(lowercase_)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
a__ =Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0)
self.add(lowercase_)
cpu_targs.append(lowercase_)
a__ =[mem.copy() for i in range(6)]
a__ =VGroup(*lowercase_).arrange(lowercase_ , buff=0)
a__ =Text('Loaded Checkpoint' , font_size=24)
a__ =Group(lowercase_ , lowercase_).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
a__ =Square(side_length=2.2)
key.move_to([-5, 2, 0])
a__ =MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase_ , lowercase_)
a__ =MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left())
a__ =MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_) , Write(lowercase_))
self.play(Write(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1))
a__ =[]
a__ =[]
for i, rect in enumerate(lowercase_):
a__ =fill.copy().set_fill(lowercase_ , opacity=0.7)
target.move_to(lowercase_)
first_animations.append(GrowFromCenter(lowercase_ , run_time=1))
a__ =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 20 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowerCAmelCase = trt.Logger(trt.Logger.WARNING)
__lowerCAmelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__lowerCAmelCase = parser.parse_args()
if args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__lowerCAmelCase = args.per_device_eval_batch_size
__lowerCAmelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowerCAmelCase = True
__lowerCAmelCase = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__lowerCAmelCase = 'temp_engine/bert-fp16.engine'
if args.inta:
__lowerCAmelCase = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__lowerCAmelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowerCAmelCase = [network.get_input(i) for i in range(network.num_inputs)]
__lowerCAmelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowerCAmelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowerCAmelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowerCAmelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
_snake_case = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
_snake_case = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __a )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __a )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __a )
# start time
_snake_case = time.time()
# Run inference
context.execute_async(
bindings=[int(__a ) for d_inp in d_inputs] + [int(__a ), int(__a )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__a , __a , __a )
cuda.memcpy_dtoh_async(__a , __a , __a )
# Synchronize the stream and take time
stream.synchronize()
# end time
_snake_case = time.time()
_snake_case = end_time - start_time
_snake_case = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowerCAmelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowerCAmelCase = raw_datasets['validation'].column_names
__lowerCAmelCase = 'question' if 'question' in column_names else column_names[0]
__lowerCAmelCase = 'context' if 'context' in column_names else column_names[1]
__lowerCAmelCase = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowerCAmelCase = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
__lowerCAmelCase = min(args.max_seq_length, tokenizer.model_max_length)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_snake_case = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_snake_case = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=__a , stride=args.doc_stride , return_overflowing_tokens=__a , return_offsets_mapping=__a , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_snake_case = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_snake_case = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_snake_case = tokenized_examples.sequence_ids(__a )
_snake_case = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_snake_case = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_snake_case = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__lowerCAmelCase = raw_datasets['validation']
# Validation Feature Creation
__lowerCAmelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__lowerCAmelCase = default_data_collator
__lowerCAmelCase = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__lowerCAmelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
_snake_case = postprocess_qa_predictions(
examples=__a , features=__a , predictions=__a , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__a , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_snake_case = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
_snake_case = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
_snake_case = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__a , label_ids=__a )
__lowerCAmelCase = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
return trt.volume(engine.get_binding_shape(__a ) ) * engine.get_binding_dtype(__a ).itemsize
# Allocate device memory for inputs and outputs.
__lowerCAmelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowerCAmelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowerCAmelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowerCAmelCase = cuda.mem_alloc(h_outputa.nbytes)
__lowerCAmelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowerCAmelCase = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
__lowerCAmelCase = 0.0
__lowerCAmelCase = 0
__lowerCAmelCase = timeit.default_timer()
__lowerCAmelCase = None
for step, batch in enumerate(eval_dataloader):
__lowerCAmelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowerCAmelCase = outputs
__lowerCAmelCase = torch.tensor(start_logits)
__lowerCAmelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowerCAmelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__lowerCAmelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__lowerCAmelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowerCAmelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__lowerCAmelCase = nested_truncate(all_preds, len(eval_dataset))
__lowerCAmelCase = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__lowerCAmelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
__lowerCAmelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 585 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCAmelCase: Any = sys.version_info >= (3, 10)
def _lowercase( __a : int=None , __a : Any=None ):
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowercase_ :
snake_case =42
snake_case =42
snake_case =42
snake_case =42
@dataclass
class lowercase_ :
snake_case =42
snake_case =field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
class lowercase_ (lowercase__ ):
snake_case ='titi'
snake_case ='toto'
snake_case =42
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =BasicEnum(self.foo)
@dataclass
class lowercase_ :
snake_case ="toto"
def __UpperCamelCase ( self) -> List[str]:
a__ =MixedTypeEnum(self.foo)
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
@dataclass
class lowercase_ :
snake_case =list_field(default=[] )
snake_case =list_field(default=[1, 2, 3] )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
snake_case =field()
snake_case =field()
snake_case =field()
def __UpperCamelCase ( self) -> List[Any]:
a__ =BasicEnum(self.required_enum)
@dataclass
class lowercase_ :
snake_case =42
snake_case =field()
snake_case =None
snake_case =field(default='toto' , metadata={'help': 'help message'} )
snake_case =list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
snake_case =False
snake_case =True
snake_case =None
@dataclass
class lowercase_ :
snake_case =None
snake_case =field(default=lowercase__ , metadata={'help': 'help message'} )
snake_case =None
snake_case =list_field(default=[] )
snake_case =list_field(default=[] )
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
a__ ={k: v for k, v in vars(lowercase_).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , lowercase_) and yy.get('choices' , lowercase_):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](lowercase_) , yy['type'](lowercase_))
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument('--bar' , type=lowercase_ , required=lowercase_)
expected.add_argument('--baz' , type=lowercase_ , required=lowercase_)
expected.add_argument('--flag' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
self.argparsersEqual(lowercase_ , lowercase_)
a__ =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((a__) , ) =parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_)
self.assertFalse(example.flag)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Optional[Any]:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
expected.add_argument('--baz' , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs='?')
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=lowercase_ , dest='baz')
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
a__ =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', '--baz'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
a__ =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
a__ =parser.parse_args_into_dataclasses(['--foo', '42'])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def __UpperCamelCase ( self) -> List[Any]:
@dataclass
class lowercase_ :
snake_case ="toto"
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42]) , )
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(args.foo , 'toto')
a__ =parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo , 'titi')
a__ =parser.parse_args(['--foo', '42'])
self.assertEqual(args.foo , 42)
def __UpperCamelCase ( self) -> Optional[int]:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=lowercase_)
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3]) , )
a__ =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7]))
def __UpperCamelCase ( self) -> Dict:
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , default=lowercase_ , type=lowercase_)
expected.add_argument('--bar' , default=lowercase_ , type=lowercase_ , help='help message')
expected.add_argument('--baz' , default=lowercase_ , type=lowercase_)
expected.add_argument('--ces' , nargs='+' , default=[] , type=lowercase_)
expected.add_argument('--des' , nargs='+' , default=[] , type=lowercase_)
a__ =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_)
for dataclass_type in dataclass_types:
a__ =HfArgumentParser(lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
a__ =parser.parse_args([])
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[]))
a__ =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3]))
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=lowercase_ , required=lowercase_)
expected.add_argument('--required_str' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> str:
a__ =HfArgumentParser(lowercase_)
a__ =argparse.ArgumentParser()
expected.add_argument('--foo' , type=lowercase_ , required=lowercase_)
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto']) , choices=['titi', 'toto'] , required=lowercase_ , )
expected.add_argument('--opt' , type=lowercase_ , default=lowercase_)
expected.add_argument('--baz' , default='toto' , type=lowercase_ , help='help message')
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=lowercase_)
self.argparsersEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
a__ =parser.parse_dict(lowercase_)[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_json')
os.mkdir(lowercase_)
with open(temp_local_path + '.json' , 'w+') as f:
json.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.json'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Any:
a__ =HfArgumentParser(lowercase_)
a__ ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
a__ =os.path.join(lowercase_ , 'temp_yaml')
os.mkdir(lowercase_)
with open(temp_local_path + '.yaml' , 'w+') as f:
yaml.dump(lowercase_ , lowercase_)
a__ =parser.parse_yaml_file(Path(temp_local_path + '.yaml'))[0]
a__ =BasicExample(**lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =HfArgumentParser(lowercase_)
self.assertIsNotNone(lowercase_)
| 20 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCamelCase (unittest.TestCase ):
def __snake_case ( self :List[str] ) ->int:
lowercase : Union[str, Any] = tempfile.mkdtemp()
lowercase : List[str] = BlipImageProcessor()
lowercase : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowercase : List[Any] = BlipProcessor(lowercase_ , lowercase_ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self :Dict , **__magic_name__ :Union[str, Any] ) ->str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).tokenizer
def __snake_case ( self :Dict , **__magic_name__ :Dict ) ->List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor
def __snake_case ( self :List[Any] ) ->Optional[int]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self :Dict ) ->str:
lowercase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase : str = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self :Optional[Any] ) ->str:
lowercase : Optional[int] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase : Any = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowercase : Any = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def __snake_case ( self :Dict ) ->int:
lowercase : Optional[Any] = self.get_image_processor()
lowercase : str = self.get_tokenizer()
lowercase : int = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase : Union[str, Any] = self.prepare_image_inputs()
lowercase : Optional[Any] = image_processor(lowercase_ , return_tensors="""np""" )
lowercase : List[str] = processor(images=lowercase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self :Tuple ) ->List[str]:
lowercase : List[str] = self.get_image_processor()
lowercase : Dict = self.get_tokenizer()
lowercase : Any = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase : Optional[Any] = """lower newer"""
lowercase : Optional[int] = processor(text=lowercase_ )
lowercase : int = tokenizer(lowercase_ , return_token_type_ids=lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self :Union[str, Any] ) ->int:
lowercase : int = self.get_image_processor()
lowercase : Tuple = self.get_tokenizer()
lowercase : Any = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase : Union[str, Any] = """lower newer"""
lowercase : List[Any] = self.prepare_image_inputs()
lowercase : Tuple = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def __snake_case ( self :str ) ->Tuple:
lowercase : Dict = self.get_image_processor()
lowercase : Optional[int] = self.get_tokenizer()
lowercase : Tuple = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : Dict = processor.batch_decode(lowercase_ )
lowercase : str = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def __snake_case ( self :Tuple ) ->List[Any]:
lowercase : Union[str, Any] = self.get_image_processor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : int = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase : Optional[Any] = """lower newer"""
lowercase : Tuple = self.prepare_image_inputs()
lowercase : Union[str, Any] = processor(text=lowercase_ , images=lowercase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 264 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class lowercase_ (lowercase__ ):
snake_case ='autoformer'
snake_case ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_ = "student_t" , lowercase_ = "nll" , lowercase_ = 1 , lowercase_ = [1, 2, 3, 4, 5, 6, 7] , lowercase_ = True , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 64 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 2 , lowercase_ = 32 , lowercase_ = 32 , lowercase_ = "gelu" , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 0.1 , lowercase_ = 100 , lowercase_ = 0.02 , lowercase_ = True , lowercase_=True , lowercase_ = 10 , lowercase_ = 25 , lowercase_ = 3 , **lowercase_ , ) -> Union[str, Any]:
# time series specific configuration
a__ =prediction_length
a__ =context_length if context_length is not None else prediction_length
a__ =distribution_output
a__ =loss
a__ =input_size
a__ =num_time_features
a__ =lags_sequence
a__ =scaling
a__ =num_dynamic_real_features
a__ =num_static_real_features
a__ =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`')
a__ =cardinality
else:
a__ =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`')
a__ =embedding_dimension
else:
a__ =[min(50 , (cat + 1) // 2) for cat in self.cardinality]
a__ =num_parallel_samples
# Transformer architecture configuration
a__ =input_size * len(self.lags_sequence) + self._number_of_features
a__ =d_model
a__ =encoder_attention_heads
a__ =decoder_attention_heads
a__ =encoder_ffn_dim
a__ =decoder_ffn_dim
a__ =encoder_layers
a__ =decoder_layers
a__ =dropout
a__ =attention_dropout
a__ =activation_dropout
a__ =encoder_layerdrop
a__ =decoder_layerdrop
a__ =activation_function
a__ =init_std
a__ =use_cache
# Autoformer
a__ =label_length
a__ =moving_average
a__ =autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCamelCase ( self) -> int:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 20 | 0 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class __UpperCamelCase (lowercase__ ):
__A = '''autoformer'''
__A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7] , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 64 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = True , _lowerCAmelCase=True , _lowerCAmelCase = 10 , _lowerCAmelCase = 25 , _lowerCAmelCase = 3 , **_lowerCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
lowercase = prediction_length
lowercase = context_length if context_length is not None else prediction_length
lowercase = distribution_output
lowercase = loss
lowercase = input_size
lowercase = num_time_features
lowercase = lags_sequence
lowercase = scaling
lowercase = num_dynamic_real_features
lowercase = num_static_real_features
lowercase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowercase = cardinality
else:
lowercase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowercase = embedding_dimension
else:
lowercase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase = num_parallel_samples
# Transformer architecture configuration
lowercase = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase = d_model
lowercase = encoder_attention_heads
lowercase = decoder_attention_heads
lowercase = encoder_ffn_dim
lowercase = decoder_ffn_dim
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = activation_function
lowercase = init_std
lowercase = use_cache
# Autoformer
lowercase = label_length
lowercase = moving_average
lowercase = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _a ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 588 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase: List[Any] = logging.get_logger(__name__)
class lowercase_ (lowercase__ ):
snake_case =['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
a__ =size if size is not None else {'shortest_edge': 256}
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_resize
a__ =size
a__ =resample
a__ =do_center_crop
a__ =crop_size
a__ =do_rescale
a__ =rescale_factor
a__ =do_normalize
a__ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a__ =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
a__ =get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> Tuple:
a__ =do_resize if do_resize is not None else self.do_resize
a__ =size if size is not None else self.size
a__ =get_size_dict(lowercase_ , default_to_square=lowercase_)
a__ =resample if resample is not None else self.resample
a__ =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ =crop_size if crop_size is not None else self.crop_size
a__ =get_size_dict(lowercase_ , param_name='crop_size')
a__ =do_rescale if do_rescale is not None else self.do_rescale
a__ =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ =do_normalize if do_normalize is not None else self.do_normalize
a__ =image_mean if image_mean is not None else self.image_mean
a__ =image_std if image_std is not None else self.image_std
a__ =make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a__ =[to_numpy_array(lowercase_) for image in images]
if do_resize:
a__ =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
a__ =[self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
a__ =[self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
a__ =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
a__ =[to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
a__ ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_ = None) -> str:
a__ =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_) != len(lowercase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(lowercase_):
a__ =target_sizes.numpy()
a__ =[]
for idx in range(len(lowercase_)):
a__ =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_)
a__ =resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowercase_)
else:
a__ =logits.argmax(dim=1)
a__ =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 20 | 0 |
"""simple docstring"""
__UpperCAmelCase = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase__ : List[str] = Stack()
UpperCAmelCase__ : str = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__a ) )
elif i in operators:
# RULE 2
operator_stack.push(__a )
elif i == ")":
# RULE 4
UpperCAmelCase__ : Optional[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ : int = operators[opr](__a , __a )
operand_stack.push(__a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__UpperCAmelCase = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 65 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.