code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
) | 363 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
_lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] )
return output
_lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
_lowerCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : Any = multiprocessing.cpu_count()
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : Any = time.time()
_lowerCAmelCase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 340 | 0 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__'''
_lowerCAmelCase : Dict = '''Dummy User'''
_lowerCAmelCase : Optional[int] = '''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'''
_lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co'''
_lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(_lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return HfApi(endpoint=_lowerCamelCase )
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = HfFolder.get_token()
HfFolder.save_token(_lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase ):
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowerCAmelCase : Tuple = '''src/transformers'''
# Matches is_xxx_available()
_lowerCAmelCase : str = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowerCAmelCase : Tuple = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCAmelCase : Union[str, Any] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowerCAmelCase : Optional[int] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowerCAmelCase : Any = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCAmelCase : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCAmelCase : List[str] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCAmelCase : List[str] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowerCAmelCase : Union[str, Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowerCAmelCase : List[Any] = re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowerCAmelCase : int = re.compile(R'''^\s*else:''')
def lowerCamelCase_( _lowerCamelCase ) -> Dict:
'''simple docstring'''
if _re_test_backend.search(_lowerCamelCase ) is None:
return None
_lowerCamelCase : Dict = [b[0] for b in _re_backend.findall(_lowerCamelCase )]
backends.sort()
return "_and_".join(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCamelCase : str = f.readlines()
_lowerCamelCase : int = 0
while line_index < len(_lowerCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase : List[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_lowerCamelCase ):
_lowerCamelCase : List[Any] = _re_one_line_import_struct.search(_lowerCamelCase ).groups()[0]
_lowerCamelCase : Optional[Any] = re.findall("\[([^\]]+)\]" , _lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_lowerCamelCase : Optional[int] = _re_import_struct_key_value.search(_lowerCamelCase )
if single_line_import_search is not None:
_lowerCamelCase : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase : Any = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_lowerCamelCase : Dict = lines[line_index]
if _re_import_struct_add_one.search(_lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_lowerCamelCase ) is not None:
_lowerCamelCase : Optional[int] = _re_import_struct_add_many.search(_lowerCamelCase ).groups()[0].split(", " )
_lowerCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_between_brackets.search(_lowerCamelCase ) is not None:
_lowerCamelCase : List[str] = _re_between_brackets.search(_lowerCamelCase ).groups()[0].split(", " )
_lowerCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(_lowerCamelCase ) > 0]
objects.extend(_lowerCamelCase )
elif _re_quote_object.search(_lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(_lowerCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase : str = []
while (
line_index < len(_lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_lowerCamelCase : Tuple = lines[line_index]
_lowerCamelCase : Tuple = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase : Optional[int] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_lowerCamelCase : Optional[Any] = lines[line_index]
_lowerCamelCase : List[str] = _re_import.search(_lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
def find_duplicates(_lowerCamelCase ):
return [k for k, v in collections.Counter(_lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase : Dict = []
for key in import_dict_objects.keys():
_lowerCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_lowerCamelCase : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase : int = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCamelCase : List[Any] = os.path.join(_lowerCamelCase , "__init__.py" )
_lowerCamelCase : List[Any] = parse_init(_lowerCamelCase )
if objects is not None:
_lowerCamelCase : Union[str, Any] = analyze_results(*_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowerCamelCase : str = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
raise ValueError("\n\n".join(_lowerCamelCase ) )
def lowerCamelCase_( ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Dict = []
for path, directories, files in os.walk(_lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(_lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_lowerCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
_lowerCamelCase : Optional[Any] = str((Path(_lowerCamelCase ) / folder).relative_to(_lowerCamelCase ) )
_lowerCamelCase : List[Any] = short_path.replace(os.path.sep , "." )
submodules.append(_lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase : Any = str((Path(_lowerCamelCase ) / fname).relative_to(_lowerCamelCase ) )
_lowerCamelCase : Tuple = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(_lowerCamelCase )
return submodules
_lowerCAmelCase : Optional[int] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def lowerCamelCase_( ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Tuple = importlib.util.spec_from_file_location(
"transformers" , os.path.join(_lowerCamelCase , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_lowerCamelCase : List[str] = spec.loader.load_module()
_lowerCamelCase : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_lowerCamelCase ) > 0:
_lowerCamelCase : List[str] = "\n".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 365 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale | 340 | 0 |
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if point:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for item in point:
if not isinstance(_lowerCamelCase , (int, float) ):
_lowerCamelCase : Dict = (
"Expected a list of numbers as input, found "
F"""{type(_lowerCamelCase ).__name__}"""
)
raise TypeError(_lowerCamelCase )
else:
_lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}"""
raise TypeError(_lowerCamelCase )
else:
raise ValueError("Missing an input" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 366 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCAmelCase : Tuple = HfArgumentParser(InitializationArguments)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCAmelCase : Union[str, Any] = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
_lowerCAmelCase : Any = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCAmelCase : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 367 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_ ( _a ):
lowerCAmelCase__ = 'mobilenet_v1'
def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = depth_multiplier
_lowerCamelCase : Any = min_depth
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Dict = tf_padding
_lowerCamelCase : Union[str, Any] = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return 1e-4 | 340 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_a )
class A_ ( _a ):
lowerCAmelCase__ = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase__ = Features({'image': Image()} )
lowerCAmelCase__ = Features({'labels': ClassLabel} )
lowerCAmelCase__ = 'image'
lowerCAmelCase__ = 'labels'
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,__lowerCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_lowerCamelCase : Any = copy.deepcopy(self )
_lowerCamelCase : Optional[Any] = self.label_schema.copy()
_lowerCamelCase : int = features[self.label_column]
_lowerCamelCase : Optional[int] = label_schema
return task_template
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
} | 368 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
for param in module.parameters():
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCamelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Dict = plt.imshow(_lowerCamelCase )
fig.axes.get_xaxis().set_visible(_lowerCamelCase )
fig.axes.get_yaxis().set_visible(_lowerCamelCase )
plt.show()
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Tuple = datetime.now()
_lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp | 340 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__snake_case : List[Any] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 369 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCamelCase : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''') | 340 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ = ['image_processor', 'tokenizer']
lowerCAmelCase__ = 'CLIPImageProcessor'
lowerCAmelCase__ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self: int ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Any=None ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : List[str] = kwargs.pop("feature_extractor" )
_lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
def __call__( self: Optional[int] ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: int=None ,__lowerCAmelCase: int=None ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if images is not None:
_lowerCamelCase : str = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None and images is not None:
_lowerCamelCase : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) ,tensor_type=__lowerCAmelCase )
def _lowercase ( self: int ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: Any ,*__lowerCAmelCase: Optional[int] ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase ,**__lowerCAmelCase )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.tokenizer.model_input_names
_lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,__lowerCAmelCase ,)
return self.image_processor_class
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,__lowerCAmelCase ,)
return self.image_processor | 370 |
"""simple docstring"""
_lowerCAmelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = [False] * len(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = [s]
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCamelCase : Any = True
_lowerCamelCase : Any = u
return visited[t]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = float("Inf" )
_lowerCamelCase : Dict = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : List[str] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 340 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = ['input_values', 'padding_mask']
def __init__( self: Tuple ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: int = 24_000 ,__lowerCAmelCase: float = 0.0 ,__lowerCAmelCase: float = None ,__lowerCAmelCase: float = None ,**__lowerCAmelCase: Union[str, Any] ,):
'''simple docstring'''
super().__init__(feature_size=__lowerCAmelCase ,sampling_rate=__lowerCAmelCase ,padding_value=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Any = chunk_length_s
_lowerCamelCase : int = overlap
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self: Tuple ,__lowerCAmelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__lowerCAmelCase: Optional[Union[bool, str, PaddingStrategy]] = None ,__lowerCAmelCase: Optional[bool] = False ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Optional[Union[str, TensorType]] = None ,__lowerCAmelCase: Optional[int] = None ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Union[str, Any] = bool(
isinstance(__lowerCAmelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
_lowerCamelCase : List[str] = [np.asarray(__lowerCAmelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__lowerCAmelCase ,np.ndarray ):
_lowerCamelCase : Dict = np.asarray(__lowerCAmelCase ,dtype=np.floataa )
elif isinstance(__lowerCAmelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Optional[int] = [np.asarray(__lowerCAmelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__lowerCAmelCase ):
if example.ndim > 2:
raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" )
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowerCamelCase : Tuple = min(array.shape[0] for array in raw_audio )
_lowerCamelCase : Optional[int] = int(np.floor(max_length / self.chunk_stride ) )
_lowerCamelCase : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowerCamelCase : Any = max(array.shape[0] for array in raw_audio )
_lowerCamelCase : Dict = int(np.ceil(max_length / self.chunk_stride ) )
_lowerCamelCase : Optional[int] = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowerCamelCase : int = "max_length"
else:
_lowerCamelCase : Dict = input_values
# normal padding on batch
if padded_inputs is None:
_lowerCamelCase : Any = self.pad(
__lowerCAmelCase ,max_length=__lowerCAmelCase ,truncation=__lowerCAmelCase ,padding=__lowerCAmelCase ,return_attention_mask=__lowerCAmelCase ,)
if padding:
_lowerCamelCase : Tuple = padded_inputs.pop("attention_mask" )
_lowerCamelCase : str = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
_lowerCamelCase : int = example[..., None]
input_values.append(example.T )
_lowerCamelCase : int = input_values
if return_tensors is not None:
_lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs | 371 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCAmelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCAmelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_lowerCamelCase ) - np.asarray(_lowerCamelCase )) ** 2 ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_lowerCamelCase , _lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase_( ):
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
benchmark() | 350 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : str = True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCamelCase )
if ret % 2 == 0:
cuts.append(_lowerCamelCase )
return ret
def lowerCamelCase_( ) -> int:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 10, 9
_lowerCAmelCase : str = defaultdict(list)
_lowerCAmelCase : dict[int, bool] = {}
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 340 | 0 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCAmelCase : List[Any] = os.path.join(git_repo_path, '''src''', '''diffusers''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = find_backend(" if not is_torch_available():" )
self.assertEqual(__lowerCAmelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_lowerCamelCase : Optional[Any] = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(__lowerCAmelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_lowerCamelCase : Any = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(__lowerCAmelCase ,"torch_and_transformers_and_onnx" )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,__lowerCAmelCase )
self.assertIn("torch_and_transformers" ,__lowerCAmelCase )
self.assertIn("flax_and_transformers" ,__lowerCAmelCase )
self.assertIn("torch_and_transformers_and_onnx" ,__lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(__lowerCAmelCase ,"\nCONSTANT = None\n" )
_lowerCamelCase : Tuple = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
__lowerCAmelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
_lowerCamelCase : Dict = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
_lowerCamelCase : Optional[int] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
_lowerCamelCase : List[str] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,__lowerCAmelCase ) | 351 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__'''
_lowerCAmelCase : Dict = '''Dummy User'''
_lowerCAmelCase : Optional[int] = '''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'''
_lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co'''
_lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(_lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return HfApi(endpoint=_lowerCamelCase )
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = HfFolder.get_token()
HfFolder.save_token(_lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase ):
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 340 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding="utf-8" ,check=__lowerCAmelCase ,)
assert hasattr(self ,"env" )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {
"enabled": True,
"processes_per_host": 8,
}
_lowerCamelCase : List[Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
_lowerCamelCase : Tuple = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
_lowerCamelCase : List[str] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__lowerCAmelCase ,instance_type=self.instance_type ,debugger_hook_config=__lowerCAmelCase ,hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__lowerCAmelCase ,py_version="py36" ,)
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
TrainingJobAnalytics(__lowerCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.create_estimator(__lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase : str = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_lowerCamelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" ,999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,"w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} ,__lowerCAmelCase ) | 352 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
class A_ ( _a ):
def __init__( self: List[Any] ,__lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_lowerCamelCase : Tuple = nn.ModuleList(__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Union[torch.Tensor, float, int] ,__lowerCAmelCase: torch.Tensor ,__lowerCAmelCase: List[torch.tensor] ,__lowerCAmelCase: List[float] ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[torch.Tensor] = None ,__lowerCAmelCase: Optional[Dict[str, Any]] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase ,__lowerCAmelCase ,self.nets ) ):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = controlnet(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,)
# merge samples
if i == 0:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = down_samples, mid_sample
else:
_lowerCamelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCAmelCase ,__lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Union[str, os.PathLike] ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Callable = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[str] = None ,):
'''simple docstring'''
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCAmelCase ,is_main_process=__lowerCAmelCase ,save_function=__lowerCAmelCase ,safe_serialization=__lowerCAmelCase ,variant=__lowerCAmelCase ,)
idx += 1
_lowerCamelCase : int = model_path_to_save + F"""_{idx}"""
@classmethod
def _lowercase ( cls: Any ,__lowerCAmelCase: Optional[Union[str, os.PathLike]] ,**__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : int = 0
_lowerCamelCase : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCamelCase : Dict = pretrained_model_path
while os.path.isdir(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = ControlNetModel.from_pretrained(__lowerCAmelCase ,**__lowerCAmelCase )
controlnets.append(__lowerCAmelCase )
idx += 1
_lowerCamelCase : Tuple = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_ ( _a ):
lowerCAmelCase__ = 'mobilenet_v1'
def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = depth_multiplier
_lowerCamelCase : Any = min_depth
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Dict = tf_padding
_lowerCamelCase : Union[str, Any] = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return 1e-4 | 353 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_lowerCamelCase : Tuple = "segformer.encoder." + key
if key.startswith("backbone" ):
_lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" )
if "norm" in key:
_lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" )
if "layer_norm1" in key:
_lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )]
_lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" )
if "attn.q" in key:
_lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_lowerCamelCase : Tuple = key.replace("attn" , "attention.self" )
if "fc1" in key:
_lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" )
if "fc2" in key:
_lowerCamelCase : Dict = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_lowerCamelCase : int = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" )
_lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )]
_lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" )
if key.startswith("head" ):
_lowerCamelCase : List[str] = key.replace("head" , "classifier" )
_lowerCamelCase : Union[str, Any] = value
return new_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_lowerCamelCase : int = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]]
_lowerCamelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCamelCase : Optional[Any] = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Any = SegformerConfig()
_lowerCamelCase : int = False
# set attributes based on model_name
_lowerCamelCase : Any = "huggingface/label-files"
if "segformer" in model_name:
_lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_lowerCamelCase : str = 150
_lowerCamelCase : Dict = "ade20k-id2label.json"
_lowerCamelCase : Dict = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCamelCase : List[str] = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
_lowerCamelCase : Tuple = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = model_name[4:6]
_lowerCamelCase : Tuple = 1000
_lowerCamelCase : List[Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : List[Any] = (1, 1000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : int = 256
elif size == "b2":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : List[Any] = 768
_lowerCamelCase : Any = [3, 4, 6, 3]
elif size == "b3":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 768
_lowerCamelCase : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
_lowerCamelCase : str = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 768
_lowerCamelCase : Dict = [3, 8, 27, 3]
elif size == "b5":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Tuple = 768
_lowerCamelCase : Tuple = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_lowerCamelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
# prepare image
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
else:
_lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase )
else:
_lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
_lowerCamelCase : Any = model(_lowerCamelCase )
_lowerCamelCase : Dict = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCamelCase : int = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCamelCase : Dict = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCamelCase : List[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
_lowerCamelCase : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 340 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase : Optional[int] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase : int = logging.getLogger()
def lowerCamelCase_( ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument("-f" )
_lowerCamelCase : Union[str, Any] = parser.parse_args()
return args.f
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="eval" ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Tuple = os.path.join(_lowerCamelCase , F"""{split}_results.json""" )
if os.path.exists(_lowerCamelCase ):
with open(_lowerCamelCase , "r" ) as f:
return json.load(_lowerCamelCase )
raise ValueError(F"""can't find {path}""" )
_lowerCAmelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( _a ):
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Any = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ):
run_flax_glue.main()
_lowerCamelCase : Union[str, Any] = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
@slow
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : int = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ):
run_clm_flax.main()
_lowerCamelCase : Optional[int] = get_results(__lowerCAmelCase )
self.assertLess(result["eval_perplexity"] ,100 )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : str = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ):
run_summarization_flax.main()
_lowerCamelCase : Dict = get_results(__lowerCAmelCase ,split="test" )
self.assertGreaterEqual(result["test_rouge1"] ,10 )
self.assertGreaterEqual(result["test_rouge2"] ,2 )
self.assertGreaterEqual(result["test_rougeL"] ,7 )
self.assertGreaterEqual(result["test_rougeLsum"] ,7 )
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Union[str, Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ):
run_mlm_flax.main()
_lowerCamelCase : Dict = get_results(__lowerCAmelCase )
self.assertLess(result["eval_perplexity"] ,42 )
@slow
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Dict = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ):
run_ta_mlm_flax.main()
_lowerCamelCase : Optional[Any] = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] ,0.42 )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 7 if get_gpu_count() > 1 else 2
_lowerCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : int = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ):
run_flax_ner.main()
_lowerCamelCase : Dict = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
self.assertGreaterEqual(result["eval_f1"] ,0.3 )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ):
run_qa.main()
_lowerCamelCase : Optional[Any] = get_results(__lowerCAmelCase )
self.assertGreaterEqual(result["eval_f1"] ,30 )
self.assertGreaterEqual(result["eval_exact"] ,30 ) | 354 |
"""simple docstring"""
_lowerCAmelCase : dict[tuple[int, int, int], int] = {}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowerCamelCase : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowerCamelCase : int = _calculate(days - 1 , _lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowerCamelCase : Tuple = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowerCamelCase : str = _calculate(days - 1 , _lowerCamelCase , 0 )
_lowerCamelCase : List[Any] = state_late + state_absent + state_ontime
_lowerCamelCase : int = prizestrings
return prizestrings
def lowerCamelCase_( _lowerCamelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(_lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 340 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_lowerCamelCase : Union[str, Any] = TOKENIZER_CLASSES
else:
_lowerCamelCase : str = {tokenizer_name: getattr(_lowerCamelCase , tokenizer_name + "Fast" )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_lowerCamelCase : Dict = TOKENIZER_CLASSES[tokenizer_name]
_lowerCamelCase : int = True
if checkpoint_name is None:
_lowerCamelCase : Any = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_lowerCamelCase : Dict = tokenizer_class.from_pretrained(_lowerCamelCase , force_download=_lowerCamelCase )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowerCamelCase : Optional[int] = checkpoint.split("/" )
_lowerCamelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
elif add_prefix:
_lowerCamelCase : Dict = checkpoint
_lowerCamelCase : Optional[Any] = dump_path
else:
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[int] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowerCamelCase : Any = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowerCamelCase : Tuple = file_path.split(_lowerCamelCase )[-1][0]
if next_char == "/":
_lowerCamelCase : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : int = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_lowerCamelCase : int = tokenizer.save_pretrained(
_lowerCamelCase , legacy_format=_lowerCamelCase , filename_prefix=_lowerCamelCase )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_lowerCamelCase )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
_lowerCAmelCase : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download) | 355 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : int = str(_lowerCamelCase )
return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" )
def lowerCamelCase_( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
_lowerCamelCase : Union[str, Any] = 100002 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 340 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self: List[str] ,*__lowerCAmelCase: Any ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
requires_backends(self ,["torch", "torchsde"] )
@classmethod
def _lowercase ( cls: List[Any] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["torch", "torchsde"] )
@classmethod
def _lowercase ( cls: Optional[Any] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Any ):
'''simple docstring'''
requires_backends(cls ,["torch", "torchsde"] ) | 356 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( _a ):
lowerCAmelCase__ = 'char'
lowerCAmelCase__ = 'bpe'
lowerCAmelCase__ = 'wp'
_lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor', 'char_tokenizer']
lowerCAmelCase__ = 'ViTImageProcessor'
lowerCAmelCase__ = 'MgpstrTokenizer'
def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None:
_lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCamelCase : Tuple = encodings["input_ids"]
return inputs
def _lowercase ( self: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences
_lowerCamelCase : Dict = char_preds.size(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" )
_lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" )
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = []
for i in range(__lowerCAmelCase ):
_lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = final_strs
_lowerCamelCase : int = final_scores
_lowerCamelCase : str = char_strs
_lowerCamelCase : Dict = bpe_strs
_lowerCamelCase : int = wp_strs
return out
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_lowerCamelCase : int = self.char_decode
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Optional[int] = "[s]"
elif format == DecodeType.BPE:
_lowerCamelCase : Dict = self.bpe_decode
_lowerCamelCase : str = 2
_lowerCamelCase : Union[str, Any] = "#"
elif format == DecodeType.WORDPIECE:
_lowerCamelCase : int = self.wp_decode
_lowerCamelCase : List[str] = 102
_lowerCamelCase : List[Any] = "[SEP]"
else:
raise ValueError(F"""Format {format} is not supported.""" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Any = pred_logits.size(0 )
_lowerCamelCase : int = pred_logits.size(1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:]
_lowerCamelCase : List[str] = decoder(__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 )
_lowerCamelCase : Any = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_str[index][:pred_eos]
_lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist()
_lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
_lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs | 340 | 0 |
"""simple docstring"""
import os
from math import logaa
def lowerCamelCase_( _lowerCamelCase = "base_exp.txt" ) -> int:
'''simple docstring'''
_lowerCamelCase : float = 0
_lowerCamelCase : List[Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowerCamelCase ) , _lowerCamelCase ) ) ):
_lowerCamelCase : Dict = list(map(_lowerCamelCase , line.split("," ) ) )
if x * logaa(_lowerCamelCase ) > largest:
_lowerCamelCase : Union[str, Any] = x * logaa(_lowerCamelCase )
_lowerCamelCase : List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution()) | 357 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowerCAmelCase : List[Any] = get_logger(__name__)
class A_ :
lowerCAmelCase__ = 'dummy_data'
lowerCAmelCase__ = 'datasets'
lowerCAmelCase__ = False
def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,):
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = dataset_name
_lowerCamelCase : Optional[int] = cache_dir
_lowerCamelCase : Optional[int] = use_local_dummy_data
_lowerCamelCase : int = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : Tuple = str(__lowerCAmelCase )
# to be downloaded
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self._dummy_file is None:
_lowerCamelCase : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : Optional[int] = cached_path(
__lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase )
return os.path.join(__lowerCAmelCase ,self.dummy_file_name )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self._bucket_url is None:
_lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase )
else:
return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return path
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return {}
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
for single_url in single_urls:
download_callback(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = single_urls
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Union[str, Any] = single_urls
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) )
_lowerCamelCase : List[Any] = value
# make sure that values are unique
if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url )
_lowerCamelCase : Optional[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__lowerCAmelCase )
return dummy_data_list
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
def _iter_archive_members(__lowerCAmelCase: Any ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : Tuple = Path(self.dummy_file ).parent
_lowerCamelCase : str = path.relative_to(__lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase )
_lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_a ):
lowerCAmelCase__ = ['note_seq']
def __init__( self: Tuple ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
requires_backends(self ,["note_seq"] )
@classmethod
def _lowercase ( cls: Dict ,*__lowerCAmelCase: int ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["note_seq"] )
@classmethod
def _lowercase ( cls: List[Any] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
requires_backends(cls ,["note_seq"] ) | 358 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_lowerCamelCase : int = precision
_lowerCamelCase : Dict = ceil(precision / 14 )
_lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = 13591409
_lowerCamelCase : int = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''') | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : int = str(_lowerCamelCase )
return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" )
def lowerCamelCase_( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
_lowerCamelCase : Union[str, Any] = 100002 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 359 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCamelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Any = i / num_diffusion_timesteps
_lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class A_ ( _a , _a ):
@register_to_config
def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 )
_lowerCamelCase : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Tuple = 1.0
# setable values
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() )
_lowerCamelCase : List[str] = variance_type
def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = num_inference_steps
_lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ):
'''simple docstring'''
if prev_timestep is None:
_lowerCamelCase : List[str] = t - 1
_lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
_lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : List[Any] = self.betas[t]
else:
_lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) )
_lowerCamelCase : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : str = variance.log()
_lowerCamelCase : str = beta.log()
_lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2
_lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase, _lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
_lowerCamelCase : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : List[Any] = t - 1
_lowerCamelCase : Dict = self.alphas_cumprod[t]
_lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : List[Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Any = torch.clamp(
__lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : Union[str, Any] = 0
if t > 0:
_lowerCamelCase : Dict = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device )
_lowerCamelCase : Any = self._get_variance(
__lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Optional[int] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
_lowerCamelCase : Dict = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,):
'''simple docstring'''
_lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
_lowerCamelCase : Any = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 340 | 0 |
"""simple docstring"""
import math
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
return math.sqrt(_lowerCamelCase ) * math.sqrt(_lowerCamelCase ) == num
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = n
while left <= right:
_lowerCamelCase : Optional[int] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : Tuple = mid - 1
else:
_lowerCamelCase : Optional[Any] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 360 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCAmelCase : str = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
for attribute in key.split("." ):
_lowerCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCamelCase : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCamelCase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_lowerCamelCase : Tuple = value
elif weight_type == "weight_g":
_lowerCamelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCamelCase : List[Any] = value
elif weight_type == "bias":
_lowerCamelCase : str = value
elif weight_type == "running_mean":
_lowerCamelCase : Optional[int] = value
elif weight_type == "running_var":
_lowerCamelCase : Optional[Any] = value
elif weight_type == "num_batches_tracked":
_lowerCamelCase : int = value
elif weight_type == "inv_freq":
_lowerCamelCase : List[str] = value
else:
_lowerCamelCase : Optional[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[Any] = fairseq_model.state_dict()
_lowerCamelCase : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCamelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCamelCase : Dict = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCamelCase : int = True
if "*" in mapped_key:
_lowerCamelCase : Tuple = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCamelCase : int = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCamelCase : int = None
elif "pos_bias_v" in name:
_lowerCamelCase : Any = None
elif "weight_g" in name:
_lowerCamelCase : Any = "weight_g"
elif "weight_v" in name:
_lowerCamelCase : Any = "weight_v"
elif "bias" in name:
_lowerCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCamelCase : Dict = "weight"
elif "running_mean" in name:
_lowerCamelCase : str = "running_mean"
elif "inv_freq" in name:
_lowerCamelCase : List[Any] = "inv_freq"
elif "running_var" in name:
_lowerCamelCase : Tuple = "running_var"
elif "num_batches_tracked" in name:
_lowerCamelCase : str = "num_batches_tracked"
else:
_lowerCamelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = full_name.split("conv_layers." )[-1]
_lowerCamelCase : List[Any] = name.split("." )
_lowerCamelCase : Union[str, Any] = int(items[0] )
_lowerCamelCase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_lowerCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_lowerCamelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ) -> Dict:
'''simple docstring'''
if config_path is not None:
_lowerCamelCase : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCamelCase : Dict = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCamelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCamelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCamelCase : Optional[int] = target_dict.pad_index
_lowerCamelCase : Dict = target_dict.bos_index
_lowerCamelCase : Optional[Any] = target_dict.eos_index
_lowerCamelCase : str = len(target_dict.symbols )
_lowerCamelCase : int = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCamelCase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[int] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCamelCase : Tuple = True if config.feat_extract_norm == "layer" else False
_lowerCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCamelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCamelCase : Any = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCamelCase : List[Any] = argparse.Namespace(task="audio_pretraining" )
_lowerCamelCase : Optional[Any] = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCamelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 340 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class A_ ( _a ):
lowerCAmelCase__ = 4_2
lowerCAmelCase__ = None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=0.9_9_9 , _lowerCamelCase="cosine" , ) -> List[str]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCamelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCamelCase : Any = i / num_diffusion_timesteps
_lowerCamelCase : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class A_ ( _a , _a ):
@register_to_config
def __init__( self: str ,__lowerCAmelCase: int = 1_000 ,__lowerCAmelCase: str = "fixed_small_log" ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[float] = 1.0 ,__lowerCAmelCase: str = "epsilon" ,__lowerCAmelCase: str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas ,dim=0 )
_lowerCamelCase : int = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Tuple = 1.0
# setable values
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.arange(0 ,__lowerCAmelCase )[::-1].copy() )
_lowerCamelCase : List[str] = variance_type
def _lowercase ( self: Any ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = num_inference_steps
_lowerCamelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0 ,__lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : int = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: str=None ):
'''simple docstring'''
if prev_timestep is None:
_lowerCamelCase : List[str] = t - 1
_lowerCamelCase : Optional[int] = self.alphas_cumprod[t]
_lowerCamelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : List[Any] = self.betas[t]
else:
_lowerCamelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Dict = torch.log(torch.clamp(__lowerCAmelCase ,min=1e-20 ) )
_lowerCamelCase : str = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : str = variance.log()
_lowerCamelCase : str = beta.log()
_lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2
_lowerCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase : int = torch.split(__lowerCAmelCase ,sample.shape[1] ,dim=1 )
else:
_lowerCamelCase : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : List[Any] = t - 1
_lowerCamelCase : Dict = self.alphas_cumprod[t]
_lowerCamelCase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Dict = 1 - alpha_prod_t
_lowerCamelCase : List[str] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : Any = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : List[Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Any = torch.clamp(
__lowerCAmelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : Union[str, Any] = 0
if t > 0:
_lowerCamelCase : Dict = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=__lowerCAmelCase ,device=model_output.device )
_lowerCamelCase : Any = self._get_variance(
__lowerCAmelCase ,predicted_variance=__lowerCAmelCase ,prev_timestep=__lowerCAmelCase ,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Optional[int] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
" for the UnCLIPScheduler." )
_lowerCamelCase : Dict = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowerCAmelCase ,pred_original_sample=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.IntTensor ,):
'''simple docstring'''
_lowerCamelCase : int = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
_lowerCamelCase : Any = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : List[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 361 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_lowerCamelCase , _lowerCamelCase ) ) )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if point:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for item in point:
if not isinstance(_lowerCamelCase , (int, float) ):
_lowerCamelCase : Dict = (
"Expected a list of numbers as input, found "
F"""{type(_lowerCamelCase ).__name__}"""
)
raise TypeError(_lowerCamelCase )
else:
_lowerCamelCase : Optional[int] = F"""Expected a list of numbers as input, found {type(_lowerCamelCase ).__name__}"""
raise TypeError(_lowerCamelCase )
else:
raise ValueError("Missing an input" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
_validate_point(_lowerCamelCase )
_validate_point(_lowerCamelCase )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_lowerCamelCase , _lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> tuple[complex, complex]:
'''simple docstring'''
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
_lowerCamelCase : Union[str, Any] = b * b - 4 * a * c
_lowerCamelCase : Optional[int] = (-b + sqrt(_lowerCamelCase )) / (2 * a)
_lowerCamelCase : List[Any] = (-b - sqrt(_lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase_( ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 362 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = np.inf
def set_batch_size(_lowerCamelCase ) -> None:
nonlocal batch_size
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Optional[int] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and feature.dtype == "binary":
_lowerCamelCase : List[str] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_lowerCamelCase , _lowerCamelCase )
return None if batch_size is np.inf else batch_size
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: NestedDataStructureLike[PathLike] ,__lowerCAmelCase: Optional[NamedSplit] = None ,__lowerCAmelCase: Optional[Features] = None ,__lowerCAmelCase: str = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: int ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,split=__lowerCAmelCase ,features=__lowerCAmelCase ,cache_dir=__lowerCAmelCase ,keep_in_memory=__lowerCAmelCase ,streaming=__lowerCAmelCase ,num_proc=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Tuple = path_or_paths if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : Any = _PACKAGED_DATASETS_MODULES["parquet"][1]
_lowerCamelCase : int = Parquet(
cache_dir=__lowerCAmelCase ,data_files=__lowerCAmelCase ,features=__lowerCAmelCase ,hash=__lowerCAmelCase ,**__lowerCAmelCase ,)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
if self.streaming:
_lowerCamelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase ,download_mode=__lowerCAmelCase ,verification_mode=__lowerCAmelCase ,base_path=__lowerCAmelCase ,num_proc=self.num_proc ,)
_lowerCamelCase : Any = self.builder.as_dataset(
split=self.split ,verification_mode=__lowerCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self: str ,__lowerCAmelCase: Dataset ,__lowerCAmelCase: Union[PathLike, BinaryIO] ,__lowerCAmelCase: Optional[int] = None ,**__lowerCAmelCase: List[Any] ,):
'''simple docstring'''
_lowerCamelCase : Any = dataset
_lowerCamelCase : Any = path_or_buf
_lowerCamelCase : Any = batch_size or get_writer_batch_size(dataset.features )
_lowerCamelCase : List[str] = parquet_writer_kwargs
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
_lowerCamelCase : str = self._write(file_obj=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
else:
_lowerCamelCase : Optional[int] = self._write(file_obj=self.path_or_buf ,batch_size=__lowerCAmelCase ,**self.parquet_writer_kwargs )
return written
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: BinaryIO ,__lowerCAmelCase: int ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = parquet_writer_kwargs.pop("path_or_buf" ,__lowerCAmelCase )
_lowerCamelCase : List[str] = self.dataset.features.arrow_schema
_lowerCamelCase : str = pq.ParquetWriter(__lowerCAmelCase ,schema=__lowerCAmelCase ,**__lowerCAmelCase )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,__lowerCAmelCase ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
_lowerCamelCase : List[str] = query_table(
table=self.dataset._data ,key=slice(__lowerCAmelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written | 340 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
for param in module.parameters():
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCamelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Dict = plt.imshow(_lowerCamelCase )
fig.axes.get_xaxis().set_visible(_lowerCamelCase )
fig.axes.get_yaxis().set_visible(_lowerCamelCase )
plt.show()
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Tuple = datetime.now()
_lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp | 363 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Optional[int] = tokenizer(example["content"] , truncation=_lowerCamelCase )["input_ids"]
_lowerCamelCase : Dict = len(example["content"] ) / len(output["input_ids"] )
return output
_lowerCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
_lowerCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
_lowerCAmelCase : Any = multiprocessing.cpu_count()
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : Any = time.time()
_lowerCAmelCase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_lowerCAmelCase : str = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 340 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Dict = 1
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Union[str, Any] = (32, 32)
_lowerCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def _lowercase ( self: Any ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=__lowerCAmelCase ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
return model
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="gelu" ,projection_dim=512 ,)
return CLIPTextModel(__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : List[str] = self.dummy_cond_unet_upscale
_lowerCamelCase : Dict = DDPMScheduler()
_lowerCamelCase : Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCamelCase : List[Any] = self.dummy_vae
_lowerCamelCase : List[Any] = self.dummy_text_encoder
_lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCamelCase : Tuple = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
_lowerCamelCase : Dict = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCamelCase : Any = StableDiffusionUpscalePipeline(
unet=__lowerCAmelCase ,low_res_scheduler=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,max_noise_level=350 ,)
_lowerCamelCase : int = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : str = "A painting of a squirrel eating a burger"
_lowerCamelCase : Union[str, Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : Union[str, Any] = sd_pipe(
[prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,)
_lowerCamelCase : Any = output.images
_lowerCamelCase : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : str = sd_pipe(
[prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,return_dict=__lowerCAmelCase ,)[0]
_lowerCamelCase : str = image[0, -3:, -3:, -1]
_lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
_lowerCamelCase : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCamelCase : Union[str, Any] = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase : Union[str, Any] = self.dummy_cond_unet_upscale
_lowerCamelCase : Any = DDPMScheduler()
_lowerCamelCase : List[str] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCamelCase : List[Any] = self.dummy_vae
_lowerCamelCase : int = self.dummy_text_encoder
_lowerCamelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCamelCase : List[Any] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
_lowerCamelCase : List[Any] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCamelCase : str = StableDiffusionUpscalePipeline(
unet=__lowerCAmelCase ,low_res_scheduler=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,max_noise_level=350 ,)
_lowerCamelCase : List[str] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Tuple = "A painting of a squirrel eating a burger"
_lowerCamelCase : str = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,)
_lowerCamelCase : Dict = output.images
assert image.shape[0] == 2
_lowerCamelCase : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : Any = sd_pipe(
[prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="np" ,)
_lowerCamelCase : List[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.dummy_cond_unet_upscale
_lowerCamelCase : Tuple = DDPMScheduler()
_lowerCamelCase : Optional[Any] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCamelCase : int = self.dummy_vae
_lowerCamelCase : Any = self.dummy_text_encoder
_lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCamelCase : Union[str, Any] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
_lowerCamelCase : List[Any] = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCamelCase : Dict = unet.half()
_lowerCamelCase : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCamelCase : Optional[Any] = StableDiffusionUpscalePipeline(
unet=__lowerCAmelCase ,low_res_scheduler=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,vae=__lowerCAmelCase ,text_encoder=__lowerCAmelCase ,tokenizer=__lowerCAmelCase ,max_noise_level=350 ,)
_lowerCamelCase : str = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "A painting of a squirrel eating a burger"
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : List[Any] = sd_pipe(
[prompt] ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=2 ,output_type="np" ,).images
_lowerCamelCase : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_lowerCamelCase : List[str] = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCamelCase : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : Dict = "a cat sitting on a park bench"
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,output_type="np" ,)
_lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCamelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_lowerCamelCase : str = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCamelCase : int = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCAmelCase ,torch_dtype=torch.floataa ,)
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing()
_lowerCamelCase : List[Any] = "a cat sitting on a park bench"
_lowerCamelCase : Any = torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,output_type="np" ,)
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self: Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCamelCase : List[str] = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCamelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCAmelCase ,torch_dtype=torch.floataa ,)
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCamelCase : List[str] = "a cat sitting on a park bench"
_lowerCamelCase : List[str] = torch.manual_seed(0 )
_lowerCamelCase : str = pipe(
prompt=__lowerCAmelCase ,image=__lowerCAmelCase ,generator=__lowerCAmelCase ,num_inference_steps=5 ,output_type="np" ,)
_lowerCamelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 364 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Optional[Any] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 340 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_lowerCamelCase : str = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname ,__lowerCAmelCase )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,**__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: Tuple ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
_lowerCamelCase : Optional[int] = [Image.fromarray(np.moveaxis(__lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : Dict = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Optional[int] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
_lowerCamelCase : str = self.get_image_processor(do_normalize=__lowerCAmelCase ,padding_value=1.0 )
_lowerCamelCase : str = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=__lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
_lowerCamelCase : Dict = self.prepare_image_inputs()
_lowerCamelCase : Tuple = image_processor(__lowerCAmelCase ,return_tensors="np" )
_lowerCamelCase : Optional[Any] = processor(images=__lowerCAmelCase ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Any = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
_lowerCamelCase : Tuple = "lower newer"
_lowerCamelCase : Any = processor(text=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Any = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=__lowerCAmelCase ,images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCAmelCase ):
processor()
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.get_image_processor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Any = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
_lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Optional[Any] = processor.batch_decode(__lowerCAmelCase )
_lowerCamelCase : str = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Tuple = self.prepare_image_inputs()
_lowerCamelCase : str = processor(text=__lowerCAmelCase ,images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names ) | 365 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale | 340 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 366 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
_lowerCamelCase : Union[str, Any] = random.Random()
_lowerCamelCase : Union[str, Any] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : Optional[int] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : Union[str, Any] = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : List[str] = 1
return attn_mask
@require_flax
class A_ :
lowerCAmelCase__ = None
lowerCAmelCase__ = ()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : List[str] = 2
_lowerCamelCase : str = inputs["input_ids"].shape[-1] // 2
_lowerCamelCase : Tuple = inputs["input_ids"][:max_batch_size, :sequence_length]
_lowerCamelCase : Any = jnp.ones_like(__lowerCAmelCase )
_lowerCamelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = max_length
_lowerCamelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Any = getattr(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = pt_model_class(__lowerCAmelCase ).eval()
_lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__lowerCAmelCase ,flax_model.params )
_lowerCamelCase : int = flax_model.generate(__lowerCAmelCase ).sequences
_lowerCamelCase : Optional[int] = pt_model.generate(torch.tensor(__lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : int = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = max_length
_lowerCamelCase : Dict = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
_lowerCamelCase : Dict = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[int] = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : str = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = max_length
_lowerCamelCase : Optional[Any] = 0.8
_lowerCamelCase : Union[str, Any] = 10
_lowerCamelCase : List[str] = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : str = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[int] = model_class(__lowerCAmelCase )
_lowerCamelCase : Any = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : Optional[int] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = max_length
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Any = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = max_length
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : List[str] = 8
_lowerCamelCase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : int = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Tuple = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = 2
_lowerCamelCase : int = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,__lowerCAmelCase )
_lowerCamelCase : Dict = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class A_ ( unittest.TestCase ):
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_lowerCamelCase : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_lowerCamelCase : Optional[Any] = "Hello world"
_lowerCamelCase : str = tokenizer(__lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCAmelCase ,"do_samples" ):
model.generate(__lowerCAmelCase ,do_samples=__lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCAmelCase ,"foo" ):
_lowerCamelCase : List[str] = {"foo": "bar"}
model.generate(__lowerCAmelCase ,**__lowerCAmelCase ) | 340 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
def __init__( self: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[str]=13 ,__lowerCAmelCase: Union[str, Any]=30 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: str=3 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: Dict=32 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: Union[str, Any]=4 ,__lowerCAmelCase: Union[str, Any]=37 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Tuple=10 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Any=3 ,__lowerCAmelCase: str=None ,__lowerCAmelCase: Tuple=2 ,):
'''simple docstring'''
_lowerCamelCase : Dict = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : int = is_training
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = scope
_lowerCamelCase : Dict = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowerCamelCase : Tuple = (image_size // patch_size) ** 2
_lowerCamelCase : str = num_patches + 2
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: str ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = TFDeiTModel(config=__lowerCAmelCase )
_lowerCamelCase : Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase )
_lowerCamelCase : str = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCamelCase : Any = 1
_lowerCamelCase : List[Any] = TFDeiTForMaskedImageModeling(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.type_sequence_label_size
_lowerCamelCase : List[str] = TFDeiTForImageClassification(__lowerCAmelCase )
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase : Any = 1
_lowerCamelCase : List[Any] = TFDeiTForImageClassification(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : int = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : str = TFDeiTModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
_lowerCamelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,tf.keras.layers.Dense ) )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: str=False ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = super()._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = TFDeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=__lowerCAmelCase ,return_tensors="tf" )
# forward pass
_lowerCamelCase : List[str] = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Any = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Any = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) ) | 367 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_ ( _a ):
lowerCAmelCase__ = 'mobilenet_v1'
def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : List[Any] = depth_multiplier
_lowerCamelCase : Any = min_depth
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Dict = tf_padding
_lowerCamelCase : Union[str, Any] = classifier_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
class A_ ( _a ):
lowerCAmelCase__ = version.parse('1.11' )
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return 1e-4 | 340 | 0 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCamelCase_( _lowerCamelCase = 3 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(_lowerCamelCase ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
_lowerCamelCase : str = QuantumRegister(_lowerCamelCase , "qr" )
_lowerCamelCase : Optional[Any] = ClassicalRegister(_lowerCamelCase , "cr" )
_lowerCamelCase : Optional[int] = QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = number_of_qubits
for i in range(_lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCamelCase , _lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_lowerCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_lowerCamelCase , _lowerCamelCase )
# simulate with 10000 shots
_lowerCamelCase : Optional[int] = Aer.get_backend("qasm_simulator" )
_lowerCamelCase : Optional[Any] = execute(_lowerCamelCase , _lowerCamelCase , shots=10000 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
) | 368 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_( _lowerCamelCase ) -> Any:
'''simple docstring'''
for param in module.parameters():
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowerCamelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Dict = plt.imshow(_lowerCamelCase )
fig.axes.get_xaxis().set_visible(_lowerCamelCase )
fig.axes.get_yaxis().set_visible(_lowerCamelCase )
plt.show()
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Tuple = datetime.now()
_lowerCamelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp | 340 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class A_ ( unittest.TestCase ):
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Optional[Any] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCamelCase : List[Any] = dict(zip(__lowerCAmelCase ,range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCamelCase : Dict = {"unk_token": "<unk>"}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_lowerCamelCase : str = os.path.join(self.tmpdirname ,__lowerCAmelCase )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,**__lowerCAmelCase: Any ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,pad_token="!" ,**__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,pad_token="!" ,**__lowerCAmelCase )
def _lowercase ( self: Any ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
_lowerCamelCase : List[Any] = [Image.fromarray(np.moveaxis(__lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : int = self.get_rust_tokenizer()
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : str = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname ,use_fast=__lowerCAmelCase )
_lowerCamelCase : str = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCamelCase : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer ,__lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : int = OwlViTProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
_lowerCamelCase : Tuple = self.get_image_processor(do_normalize=__lowerCAmelCase )
_lowerCamelCase : List[str] = OwlViTProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=__lowerCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.get_image_processor()
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Any = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(__lowerCAmelCase ,return_tensors="np" )
_lowerCamelCase : List[Any] = processor(images=__lowerCAmelCase ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : List[str] = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
_lowerCamelCase : List[Any] = "lower newer"
_lowerCamelCase : Tuple = processor(text=__lowerCAmelCase ,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase ,return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() ,encoded_processor[key][0].tolist() )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
_lowerCamelCase : List[Any] = "lower newer"
_lowerCamelCase : List[Any] = self.prepare_image_inputs()
_lowerCamelCase : str = processor(text=__lowerCAmelCase ,images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = "google/owlvit-base-patch32"
_lowerCamelCase : Dict = OwlViTProcessor.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = ["cat", "nasa badge"]
_lowerCamelCase : str = processor(text=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape ,(2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = "google/owlvit-base-patch32"
_lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [["cat", "nasa badge"], ["person"]]
_lowerCamelCase : str = processor(text=__lowerCAmelCase )
_lowerCamelCase : List[str] = 16
_lowerCamelCase : List[Any] = len(__lowerCAmelCase )
_lowerCamelCase : Dict = max([len(__lowerCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape ,(batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "google/owlvit-base-patch32"
_lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = ["cat", "nasa badge"]
_lowerCamelCase : Any = processor(text=__lowerCAmelCase )
_lowerCamelCase : List[str] = 16
_lowerCamelCase : List[Any] = inputs["input_ids"]
_lowerCamelCase : Dict = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape ,(2, seq_length) )
self.assertListEqual(list(input_ids[0] ) ,predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) ,predicted_ids[1] )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
_lowerCamelCase : str = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(images=__lowerCAmelCase ,query_images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__lowerCAmelCase ,image_processor=__lowerCAmelCase )
_lowerCamelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Dict = processor.batch_decode(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase ) | 369 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = len(_lowerCamelCase )
_lowerCamelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCamelCase : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''') | 340 | 0 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
class A_ :
"""simple docstring"""
def __init__( self: List[Any] ,__lowerCAmelCase: str = None ,__lowerCAmelCase: uuid.UUID = None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: Tuple=None ):
'''simple docstring'''
if not conversation_id:
_lowerCamelCase : List[str] = uuid.uuida()
if past_user_inputs is None:
_lowerCamelCase : Dict = []
if generated_responses is None:
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : uuid.UUID = conversation_id
_lowerCamelCase : List[str] = past_user_inputs
_lowerCamelCase : List[str] = generated_responses
_lowerCamelCase : Optional[str] = text
def __eq__( self: Optional[int] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowercase ( self: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
_lowerCamelCase : Tuple = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
_lowerCamelCase : Optional[int] = text
def _lowercase ( self: Tuple ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_lowerCamelCase : str = None
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ):
'''simple docstring'''
self.generated_responses.append(__lowerCAmelCase )
def _lowercase ( self: Any ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
_lowerCamelCase : Dict = "user" if is_user else "bot"
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_a , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class A_ ( _a ):
"""simple docstring"""
def __init__( self: Dict ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: Any ):
'''simple docstring'''
super().__init__(*__lowerCAmelCase ,**__lowerCAmelCase )
if self.tokenizer.pad_token_id is None:
_lowerCamelCase : Tuple = self.tokenizer.eos_token
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: List[str]=None ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = {}
_lowerCamelCase : Tuple = {}
if min_length_for_response is not None:
_lowerCamelCase : Union[str, Any] = min_length_for_response
if minimum_tokens is not None:
_lowerCamelCase : Dict = minimum_tokens
if "max_length" in generate_kwargs:
_lowerCamelCase : Optional[Any] = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_lowerCamelCase : Union[str, Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowerCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self: Union[str, Any] ,__lowerCAmelCase: Union[Conversation, List[Conversation]] ,__lowerCAmelCase: Optional[int]=0 ,**__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : int = super().__call__(__lowerCAmelCase ,num_workers=__lowerCAmelCase ,**__lowerCAmelCase )
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1:
return outputs[0]
return outputs
def _lowercase ( self: Any ,__lowerCAmelCase: Conversation ,__lowerCAmelCase: List[str]=32 ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer ,"_build_conversation_input_ids" ):
_lowerCamelCase : Tuple = self.tokenizer._build_conversation_input_ids(__lowerCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_lowerCamelCase : Optional[int] = self._legacy_parse_and_tokenize(__lowerCAmelCase )
if self.framework == "pt":
_lowerCamelCase : int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_lowerCamelCase : Tuple = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Tuple=10 ,**__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = generate_kwargs.get("max_length" ,self.model.config.max_length )
_lowerCamelCase : Any = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
_lowerCamelCase : Union[str, Any] = max_length - minimum_tokens
_lowerCamelCase : Optional[Any] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_lowerCamelCase : Dict = model_inputs["attention_mask"][:, -trim:]
_lowerCamelCase : str = model_inputs.pop("conversation" )
_lowerCamelCase : Tuple = max_length
_lowerCamelCase : Union[str, Any] = self.model.generate(**__lowerCAmelCase ,**__lowerCAmelCase )
if self.model.config.is_encoder_decoder:
_lowerCamelCase : Dict = 1
else:
_lowerCamelCase : Any = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: str=True ):
'''simple docstring'''
_lowerCamelCase : Tuple = model_outputs["output_ids"]
_lowerCamelCase : Optional[Any] = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=__lowerCAmelCase ,clean_up_tokenization_spaces=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(__lowerCAmelCase )
return conversation
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Conversation ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.tokenizer.eos_token_id
_lowerCamelCase : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowerCAmelCase ,add_special_tokens=__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > self.tokenizer.model_max_length:
_lowerCamelCase : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 370 |
"""simple docstring"""
_lowerCAmelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = [False] * len(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = [s]
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCamelCase : Any = True
_lowerCamelCase : Any = u
return visited[t]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = float("Inf" )
_lowerCamelCase : Dict = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : List[str] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 340 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1024 , _lowerCamelCase=1024 , _lowerCamelCase=False , **_lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : str = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCamelCase : Optional[int] = SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path="train" , **_lowerCamelCase )
_lowerCamelCase : Tuple = tok.pad_token_id
def get_lens(_lowerCamelCase ):
_lowerCamelCase : Tuple = tqdm(
DataLoader(_lowerCamelCase , batch_size=512 , num_workers=8 , shuffle=_lowerCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowerCamelCase : int = []
for batch in dl:
_lowerCamelCase : List[str] = batch["input_ids"].ne(_lowerCamelCase ).sum(1 ).tolist()
_lowerCamelCase : Tuple = batch["labels"].ne(_lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowerCamelCase , _lowerCamelCase ):
max_lens.append(max(_lowerCamelCase , _lowerCamelCase ) )
else:
max_lens.extend(_lowerCamelCase )
return max_lens
_lowerCamelCase : Optional[Any] = get_lens(_lowerCamelCase )
_lowerCamelCase : Tuple = SeqaSeqDataset(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , type_path="val" , **_lowerCamelCase )
_lowerCamelCase : Optional[Any] = get_lens(_lowerCamelCase )
pickle_save(_lowerCamelCase , train_ds.len_file )
pickle_save(_lowerCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 371 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'camembert'
def __init__( self: Tuple ,__lowerCAmelCase: Union[str, Any]=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Union[str, Any]=12 ,__lowerCAmelCase: int=12 ,__lowerCAmelCase: Optional[int]=3_072 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: Any=1 ,__lowerCAmelCase: Any=0 ,__lowerCAmelCase: Optional[int]=2 ,__lowerCAmelCase: Any="absolute" ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Tuple=None ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 340 | 0 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase = 10**9 ) -> int:
_lowerCAmelCase =1
_lowerCAmelCase =2
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_lowerCAmelCase =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 341 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _lowerCamelCase(__UpperCamelCase ) -> list[str]:
_lowerCAmelCase =[]
_lowerCAmelCase =11
_lowerCAmelCase =int("""1""" + """0""" * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
_lowerCAmelCase =10
return solutions
def _lowerCamelCase(__UpperCamelCase = 2 ) -> int:
_lowerCAmelCase =1.0
for fraction in fraction_list(__UpperCamelCase ):
_lowerCAmelCase =Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 1 |
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = False ) -> int:
_lowerCAmelCase =scheduler
_lowerCAmelCase =optimizers if isinstance(__UpperCAmelCase , (list, tuple) ) else [optimizers]
_lowerCAmelCase =split_batches
_lowerCAmelCase =step_with_optimizer
_lowerCAmelCase =GradientState()
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_lowerCAmelCase =AcceleratorState().num_processes
for _ in range(__UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
return self.scheduler.get_last_lr()
def _lowerCAmelCase ( self ) -> Any:
return self.scheduler.state_dict()
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict:
self.scheduler.load_state_dict(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return self.scheduler.get_lr()
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.scheduler.print_lr(*__UpperCAmelCase , **__UpperCAmelCase )
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
__A = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =graph
# mapping node to its parent in resulting breadth first tree
_lowerCAmelCase ={}
_lowerCAmelCase =source_vertex
def _lowerCAmelCase ( self ) -> None:
_lowerCAmelCase ={self.source_vertex}
_lowerCAmelCase =None
_lowerCAmelCase =[self.source_vertex] # first in first out queue
while queue:
_lowerCAmelCase =queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__UpperCAmelCase )
_lowerCAmelCase =vertex
queue.append(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
_lowerCAmelCase =self.parent.get(__UpperCAmelCase )
if target_vertex_parent is None:
_lowerCAmelCase =(
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__UpperCAmelCase )
return self.shortest_path(__UpperCAmelCase ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__A = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__A = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
__A = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
__A = soup.find('meta', {'property': 'og:image'})['content']
__A = requests.get(image_url).content
__A = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 341 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" )
_lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" )
_lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" )
_lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" )
_lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item
_lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" )
_lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" )
_lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCAmelCase =old_checkpoint[path]
_lowerCAmelCase =old_tensor.shape[0] // 3
_lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 )
_lowerCAmelCase =query.reshape(__UpperCamelCase )
_lowerCAmelCase =key.reshape(__UpperCamelCase )
_lowerCAmelCase =value.reshape(__UpperCamelCase )
for path in paths:
_lowerCAmelCase =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0]
else:
_lowerCAmelCase =old_checkpoint[path["""old"""]]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase =checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.2.bias"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase =checkpoint["""out.0.weight"""]
_lowerCAmelCase =checkpoint["""out.0.bias"""]
_lowerCAmelCase =checkpoint["""out.2.weight"""]
_lowerCAmelCase =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
_lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
_lowerCAmelCase =middle_blocks[0]
_lowerCAmelCase =middle_blocks[1]
_lowerCAmelCase =middle_blocks[2]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
_lowerCAmelCase =i // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =i % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
_lowerCAmelCase ={}
for layer in output_block_layers:
_lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
_lowerCAmelCase =[layer_name]
if len(__UpperCamelCase ) > 1:
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
_lowerCAmelCase =[]
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
_lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
_lowerCAmelCase =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 341 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__A = 5_0003
__A = 5_0002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = PLBartTokenizer
lowerCamelCase = None
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase =PLBartTokenizer(__UpperCAmelCase , language_codes="""base""" , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =PLBartTokenizer(__UpperCAmelCase , language_codes="""base""" , keep_accents=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_lowerCAmelCase =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase =tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_lowerCAmelCase =tokenizer.vocab_size
_lowerCAmelCase =[tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) for x in range(end - 4 , __UpperCAmelCase )]
self.assertListEqual(__UpperCAmelCase , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
_lowerCAmelCase ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_lowerCAmelCase =tokenizer(__UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) , __UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =PLBartTokenizer(__UpperCAmelCase , language_codes="""multi""" , keep_accents=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_lowerCAmelCase =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase =tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_lowerCAmelCase =tokenizer.vocab_size
_lowerCAmelCase =[tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) for x in range(end - 7 , __UpperCAmelCase )]
self.assertListEqual(
__UpperCAmelCase , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
_lowerCAmelCase ="""java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_lowerCAmelCase =tokenizer(__UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) , __UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = '''uclanlp/plbart-python-en_XX'''
lowerCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
lowerCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
lowerCamelCase = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def _lowerCAmelCase ( cls ) -> Union[str, Any]:
_lowerCAmelCase =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
_lowerCAmelCase =1
return cls
def _lowerCAmelCase ( self ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids )
_lowerCAmelCase =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
_lowerCAmelCase =self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , __UpperCAmelCase )
_lowerCAmelCase =10
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =tempfile.mkdtemp()
_lowerCAmelCase =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCAmelCase )
_lowerCAmelCase =PLBartTokenizer.from_pretrained(__UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase )
@require_torch
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="""pt""" )
_lowerCAmelCase =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_lowerCAmelCase =shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCAmelCase =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors="""pt""" )
_lowerCAmelCase =self.tokenizer(
text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=10 , return_tensors="""pt""" )
_lowerCAmelCase =targets["""input_ids"""]
_lowerCAmelCase =shift_tokens_right(__UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 341 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
if len(__UpperCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase =len(__UpperCamelCase ) // 2
_lowerCAmelCase =arr[0:mid]
_lowerCAmelCase =arr[mid:]
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCamelCase() -> str:
_lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase =[]
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 | 1 |
"""simple docstring"""
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]:
_lowerCAmelCase =data
_lowerCAmelCase =previous
_lowerCAmelCase =next_node
def __str__( self ) -> str:
return f'''{self.data}'''
def _lowerCAmelCase ( self ) -> int:
return self.data
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return self.next
def _lowerCAmelCase ( self ) -> Dict:
return self.previous
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =head
def __iter__( self ) -> Union[str, Any]:
return self
def _lowerCAmelCase ( self ) -> List[Any]:
if not self.current:
raise StopIteration
else:
_lowerCAmelCase =self.current.get_data()
_lowerCAmelCase =self.current.get_next()
return value
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Tuple:
_lowerCAmelCase =None # First node in list
_lowerCAmelCase =None # Last node in list
def __str__( self ) -> Union[str, Any]:
_lowerCAmelCase =self.head
_lowerCAmelCase =[]
while current is not None:
nodes.append(current.get_data() )
_lowerCAmelCase =current.get_next()
return " ".join(str(__UpperCAmelCase ) for node in nodes )
def __contains__( self , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =self.head
while current:
if current.get_data() == value:
return True
_lowerCAmelCase =current.get_next()
return False
def __iter__( self ) -> int:
return LinkedListIterator(self.head )
def _lowerCAmelCase ( self ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def _lowerCAmelCase ( self ) -> int:
if self.tail:
return self.tail.get_data()
return None
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None:
if self.head is None:
_lowerCAmelCase =node
_lowerCAmelCase =node
else:
self.insert_before_node(self.head , __UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None:
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.insert_after_node(self.tail , __UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None:
_lowerCAmelCase =Node(__UpperCAmelCase )
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.set_tail(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =node
_lowerCAmelCase =node.previous
if node.get_previous() is None:
_lowerCAmelCase =node_to_insert
else:
_lowerCAmelCase =node_to_insert
_lowerCAmelCase =node_to_insert
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =node
_lowerCAmelCase =node.next
if node.get_next() is None:
_lowerCAmelCase =node_to_insert
else:
_lowerCAmelCase =node_to_insert
_lowerCAmelCase =node_to_insert
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
_lowerCAmelCase =1
_lowerCAmelCase =Node(__UpperCAmelCase )
_lowerCAmelCase =self.head
while node:
if current_position == position:
self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase )
return
current_position += 1
_lowerCAmelCase =node.next
self.insert_after_node(self.tail , __UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Node:
_lowerCAmelCase =self.head
while node:
if node.get_data() == item:
return node
_lowerCAmelCase =node.get_next()
raise Exception("""Node not found""" )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict:
if (node := self.get_node(__UpperCAmelCase )) is not None:
if node == self.head:
_lowerCAmelCase =self.head.get_next()
if node == self.tail:
_lowerCAmelCase =self.tail.get_previous()
self.remove_node_pointers(__UpperCAmelCase )
@staticmethod
def _lowerCAmelCase ( __UpperCAmelCase ) -> None:
if node.get_next():
_lowerCAmelCase =node.previous
if node.get_previous():
_lowerCAmelCase =node.next
_lowerCAmelCase =None
_lowerCAmelCase =None
def _lowerCAmelCase ( self ) -> Optional[Any]:
return self.head is None
def _lowerCamelCase() -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341 | 1 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__A = logging.getLogger(__name__)
__A = 50 # max width of layer names
__A = 70 # max width of quantizer names
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
_lowerCAmelCase =parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=__UpperCamelCase , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=__UpperCamelCase , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=__UpperCamelCase , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=__UpperCamelCase , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=__UpperCamelCase , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=__UpperCamelCase , type=__UpperCamelCase , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=__UpperCamelCase , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def _lowerCamelCase(__UpperCamelCase ) -> str:
if args.calibrator == "max":
_lowerCAmelCase ="""max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
_lowerCAmelCase ="""histogram"""
elif args.calibrator == "mse":
_lowerCAmelCase ="""histogram"""
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
_lowerCAmelCase =QuantDescriptor(num_bits=args.aprec , calib_method=__UpperCamelCase )
_lowerCAmelCase =QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__UpperCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
logger.info("""Configuring Model for Quantization""" )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__UpperCamelCase , ["""embeddings"""] , which="""weight""" , _disabled=__UpperCamelCase )
if args.quant_disable:
set_quantizer_by_name(__UpperCamelCase , [""""""] , _disabled=__UpperCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__UpperCamelCase , args.quant_disable_keyword , _disabled=__UpperCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__UpperCamelCase , [R"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=__UpperCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__UpperCamelCase , [R"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=__UpperCamelCase )
if args.recalibrate_weights:
recalibrate_weights(__UpperCamelCase )
if args.fuse_qkv:
fuse_qkv(__UpperCamelCase , __UpperCamelCase )
if args.clip_gelu:
clip_gelu(__UpperCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple:
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
def fusea(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(__UpperCamelCase , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
_lowerCAmelCase =qq._amax.detach().item()
_lowerCAmelCase =qk._amax.detach().item()
_lowerCAmelCase =qv._amax.detach().item()
_lowerCAmelCase =max(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
qq._amax.fill_(__UpperCamelCase )
qk._amax.fill_(__UpperCamelCase )
qv._amax.fill_(__UpperCamelCase )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict:
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
_lowerCAmelCase =mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__UpperCamelCase )
_lowerCAmelCase =mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
for name, mod in model.named_modules():
if hasattr(__UpperCamelCase , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
_lowerCAmelCase =mod.weight.shape[0]
_lowerCAmelCase =mod._weight_quantizer._amax.detach()
_lowerCAmelCase =torch.ones(__UpperCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]:
for name, mod in model.named_modules():
if hasattr(__UpperCamelCase , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_lowerCAmelCase =set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_lowerCAmelCase =set(range(len(mod.weight.size() ) ) ) - axis_set
_lowerCAmelCase =pytorch_quantization.utils.reduce_amax(mod.weight , axis=__UpperCamelCase , keepdims=__UpperCamelCase ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_lowerCAmelCase =amax
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=25 , __UpperCamelCase=180 , __UpperCamelCase=None ) -> List[str]:
if ignore is None:
_lowerCAmelCase =[]
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =[ignore]
_lowerCAmelCase =0
for name, mod in model.named_modules():
if not hasattr(__UpperCamelCase , """weight""" ):
continue
_lowerCAmelCase =max(__UpperCamelCase , len(__UpperCamelCase ) )
for name, mod in model.named_modules():
_lowerCAmelCase =getattr(__UpperCamelCase , """_input_quantizer""" , __UpperCamelCase )
_lowerCAmelCase =getattr(__UpperCamelCase , """_weight_quantizer""" , __UpperCamelCase )
if not hasattr(__UpperCamelCase , """weight""" ):
continue
if type(__UpperCamelCase ) in ignore:
continue
if [True for s in ignore if type(__UpperCamelCase ) is str and s in name]:
continue
_lowerCAmelCase =F'''Act:{input_q.extra_repr()}'''
_lowerCAmelCase =F'''Wgt:{weight_q.extra_repr()}'''
_lowerCAmelCase =F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(__UpperCamelCase ) <= line_width:
logger.info(__UpperCamelCase )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{' ':{name_width}} {wgt_str}''' )
def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]:
_lowerCAmelCase =0
for name, mod in model.named_modules():
if isinstance(__UpperCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
_lowerCAmelCase =getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if quantizer_mod is not None:
assert hasattr(__UpperCamelCase , __UpperCamelCase )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase="both" , **__UpperCamelCase ) -> int:
_lowerCAmelCase =F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(__UpperCamelCase , __UpperCamelCase , """_input_quantizer""" , __UpperCamelCase , __UpperCamelCase )
if which in ["weight", "both"]:
set_quantizer(__UpperCamelCase , __UpperCamelCase , """_weight_quantizer""" , __UpperCamelCase , __UpperCamelCase )
logger.info(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> str:
for name, mod in model.named_modules():
if hasattr(__UpperCamelCase , """_input_quantizer""" ) or hasattr(__UpperCamelCase , """_weight_quantizer""" ):
for n in names:
if re.search(__UpperCamelCase , __UpperCamelCase ):
set_quantizers(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
logger.info(__UpperCamelCase )
| 341 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=2_24 , __UpperCAmelCase=10_00 , __UpperCAmelCase=[3, 3, 6, 4] , __UpperCAmelCase=[48, 56, 1_12, 2_20] , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =num_labels
_lowerCAmelCase =image_size
_lowerCAmelCase =layer_depths
_lowerCAmelCase =embed_dims
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> Any:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__UpperCAmelCase , layer_scale_init_value=1e-5 , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_lowerCAmelCase =SwiftFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =SwiftFormerForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_lowerCAmelCase =SwiftFormerForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ) -> List[str]:
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =self.prepare_config_and_inputs()
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =SwiftFormerModelTester(self )
_lowerCAmelCase =ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> Optional[Any]:
pass
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =SwiftFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _lowerCAmelCase ( self ) -> Tuple:
pass
def _lowerCAmelCase ( self ) -> str:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_lowerCAmelCase =outputs.hidden_states
_lowerCAmelCase =8
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__UpperCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Dict:
def _config_zero_init(__UpperCAmelCase ):
_lowerCAmelCase =copy.deepcopy(__UpperCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__UpperCAmelCase , __UpperCAmelCase , 1e-10 )
if isinstance(getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ):
_lowerCAmelCase =_config_zero_init(getattr(__UpperCAmelCase , __UpperCAmelCase ) )
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return configs_no_init
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =_config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCamelCase() -> List[str]:
_lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ) -> str:
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(__UpperCAmelCase )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
# verify the logits
_lowerCAmelCase =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_lowerCAmelCase =torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 341 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple:
_lowerCAmelCase =compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 341 | 1 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__A = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__A = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__A = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =spearmanr(__UpperCAmelCase , __UpperCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 341 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 341 | 1 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCamelCase() -> None:
print("""Making key files...""" )
make_key_files("""rsa""" , 1024 )
print("""Key files generation successful.""" )
def _lowerCamelCase(__UpperCamelCase ) -> tuple[tuple[int, int], tuple[int, int]]:
print("""Generating prime p...""" )
_lowerCAmelCase =rabinMiller.generate_large_prime(__UpperCamelCase )
print("""Generating prime q...""" )
_lowerCAmelCase =rabinMiller.generate_large_prime(__UpperCamelCase )
_lowerCAmelCase =p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
_lowerCAmelCase =random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(__UpperCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
_lowerCAmelCase =cryptoMath.find_mod_inverse(__UpperCamelCase , (p - 1) * (q - 1) )
_lowerCAmelCase =(n, e)
_lowerCAmelCase =(n, d)
return (public_key, private_key)
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
_lowerCAmelCase , _lowerCAmelCase =generate_key(__UpperCamelCase )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , """w""" ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , """w""" ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCAmelCase =flax_key_tuple[:-1] + ("""weight""",)
_lowerCAmelCase =torch.permute(__UpperCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCamelCase ):
# linear layer
_lowerCAmelCase =flax_key_tuple[:-1] + ("""weight""",)
_lowerCAmelCase =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCAmelCase =flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if "metadata" in layer:
_lowerCAmelCase =layer.split("""metadata""" )
_lowerCAmelCase ="""""".join(split_layer[0] )[:-1]
_lowerCAmelCase =[tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
_lowerCAmelCase =layer.split("""kvstore""" )
_lowerCAmelCase ="""""".join(split_layer[0] )[:-1]
_lowerCAmelCase =[tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
_lowerCAmelCase =layer.split("""/""" )
_lowerCAmelCase ="""/""".join(split_layer[:-1] )
_lowerCAmelCase =(split_layer[-1],)
if "kvstore/path" in layer:
_lowerCAmelCase =F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_lowerCAmelCase ="""file"""
else:
_lowerCAmelCase =checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict:
_lowerCAmelCase =rename_keys(__UpperCamelCase )
_lowerCAmelCase ={}
for k, v in current_block.items():
_lowerCAmelCase =v
_lowerCAmelCase =new_current_block
torch.save(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = WEIGHTS_NAME ) -> Tuple:
_lowerCAmelCase =convert_file_size_to_int(__UpperCamelCase )
_lowerCAmelCase =[]
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
_lowerCAmelCase =serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
_lowerCAmelCase =flatten_dict(__UpperCamelCase , sep="""/""" )
_lowerCAmelCase ={}
for layer in checkpoint_info.keys():
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =get_key_and_tensorstore_dict(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if curr_real_layer_name in all_layers:
_lowerCAmelCase =content
else:
_lowerCAmelCase ={split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCAmelCase =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
_lowerCAmelCase =raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCAmelCase , _lowerCAmelCase =rename_base_flax_keys(tuple(key.split("""/""" ) ) , __UpperCamelCase )
_lowerCAmelCase ="""/""".join(__UpperCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCAmelCase =os.path.join(
__UpperCamelCase , weights_name.replace(""".bin""" , F'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =raw_weights.to(getattr(__UpperCamelCase , __UpperCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCAmelCase =os.path.join(__UpperCamelCase , weights_name.replace(""".bin""" , F'''-{len(__UpperCamelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__UpperCamelCase , __UpperCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCAmelCase ={}
_lowerCAmelCase ={}
for idx, shard in enumerate(__UpperCamelCase ):
_lowerCAmelCase =weights_name.replace(
""".bin""" , F'''-{idx+1:05d}-of-{len(__UpperCamelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_lowerCAmelCase =os.path.join(__UpperCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
_lowerCAmelCase =shard
for key in shard:
_lowerCAmelCase =shard_file
# Add the metadata
_lowerCAmelCase ={"""total_size""": total_size}
_lowerCAmelCase ={"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase =json.dumps(__UpperCamelCase , indent=2 , sort_keys=__UpperCamelCase ) + """\n"""
f.write(__UpperCamelCase )
return metadata, index
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _lowerCamelCase() -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCAmelCase =SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
_lowerCAmelCase =SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
_lowerCAmelCase =TaTokenizer.from_pretrained("""t5-small""" )
_lowerCAmelCase ="""A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
_lowerCAmelCase =tokenizer(__UpperCamelCase , return_tensors="""pt""" ).input_ids
_lowerCAmelCase =model.generate(__UpperCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = LEDConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=4 , ) -> List[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_lowerCAmelCase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_lowerCAmelCase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_lowerCAmelCase =prepare_led_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =tf.concat(
[tf.zeros_like(__UpperCAmelCase )[:, :-1], tf.ones_like(__UpperCAmelCase )[:, -1:]] , axis=-1 , )
_lowerCAmelCase =global_attention_mask
return config, inputs_dict
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =TFLEDModel(config=__UpperCAmelCase ).get_decoder()
_lowerCAmelCase =inputs_dict["""input_ids"""]
_lowerCAmelCase =input_ids[:1, :]
_lowerCAmelCase =inputs_dict["""attention_mask"""][:1, :]
_lowerCAmelCase =1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
_lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase =output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1e-3 )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Tuple:
if attention_mask is None:
_lowerCAmelCase =tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =TFLEDModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =tf.zeros_like(inputs_dict["""attention_mask"""] )
_lowerCAmelCase =2
_lowerCAmelCase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
_lowerCAmelCase =True
_lowerCAmelCase =self.model_tester.seq_length
_lowerCAmelCase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__UpperCAmelCase ):
_lowerCAmelCase =outputs.decoder_attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__UpperCAmelCase ):
_lowerCAmelCase =[t.numpy() for t in outputs.encoder_attentions]
_lowerCAmelCase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_lowerCAmelCase =True
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_lowerCAmelCase =len(__UpperCAmelCase )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
if self.is_encoder_decoder:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_decoder_attentions_output(__UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase =True
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
# Check attention is always last and order is fine
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> str:
# TODO: Head-masking not yet implement
pass
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
__A = 1E-4
@slow
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
_lowerCAmelCase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_lowerCAmelCase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_lowerCAmelCase =prepare_led_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =model(**__UpperCAmelCase )[0]
_lowerCAmelCase =(1, 10_24, 7_68)
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
_lowerCAmelCase =tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
_lowerCAmelCase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_lowerCAmelCase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_lowerCAmelCase =prepare_led_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =model(**__UpperCAmelCase )[0]
_lowerCAmelCase =(1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
_lowerCAmelCase =tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1e-3 , rtol=1e-3 )
| 341 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict:
_lowerCAmelCase ={doc: key_lines}
_lowerCAmelCase ={doc: sys_lines}
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowerCAmelCase =(conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
_lowerCAmelCase =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase =line.split()[5]
if not parse_col == "-":
_lowerCAmelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase =evaluate(
key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , )
return score
| 341 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = XGLMConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =ffn_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Dict:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =TFXGLMModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
_lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase ="""left"""
# use different length sentences to test batching
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
_lowerCAmelCase =inputs["""input_ids"""]
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 341 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_lowerCAmelCase =jieba
_lowerCAmelCase =str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCAmelCase ( self ) -> Dict:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =super()._decode(*__UpperCAmelCase , **__UpperCAmelCase )
_lowerCAmelCase =text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 341 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 341 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__A = 8
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=BITS ) -> str:
_lowerCAmelCase =x.device
_lowerCAmelCase =(x * 255).int().clamp(0 , 255 )
_lowerCAmelCase =2 ** torch.arange(bits - 1 , -1 , -1 , device=__UpperCamelCase )
_lowerCAmelCase =rearrange(__UpperCamelCase , """d -> d 1 1""" )
_lowerCAmelCase =rearrange(__UpperCamelCase , """b c h w -> b c 1 h w""" )
_lowerCAmelCase =((x & mask) != 0).float()
_lowerCAmelCase =rearrange(__UpperCamelCase , """b c d h w -> b (c d) h w""" )
_lowerCAmelCase =bits * 2 - 1
return bits
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=BITS ) -> List[str]:
_lowerCAmelCase =x.device
_lowerCAmelCase =(x > 0).int()
_lowerCAmelCase =2 ** torch.arange(bits - 1 , -1 , -1 , device=__UpperCamelCase , dtype=torch.intaa )
_lowerCAmelCase =rearrange(__UpperCamelCase , """d -> d 1 1""" )
_lowerCAmelCase =rearrange(__UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
_lowerCAmelCase =reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def _lowerCamelCase(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = True , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_lowerCAmelCase =timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_lowerCAmelCase =self.alphas_cumprod[timestep]
_lowerCAmelCase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_lowerCAmelCase =1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_lowerCAmelCase =self.bit_scale
if self.config.clip_sample:
_lowerCAmelCase =torch.clamp(__UpperCamelCase , -scale , __UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_lowerCAmelCase =self._get_variance(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_lowerCAmelCase =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase =(1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_lowerCAmelCase =model_output.device if torch.is_tensor(__UpperCamelCase ) else """cpu"""
_lowerCAmelCase =torch.randn(model_output.shape , dtype=model_output.dtype , generator=__UpperCamelCase ).to(__UpperCamelCase )
_lowerCAmelCase =self._get_variance(__UpperCamelCase , __UpperCamelCase ) ** 0.5 * eta * noise
_lowerCAmelCase =prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def _lowerCamelCase(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="epsilon" , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
_lowerCAmelCase =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_lowerCAmelCase , _lowerCAmelCase =torch.split(__UpperCamelCase , sample.shape[1] , dim=1 )
else:
_lowerCAmelCase =None
# 1. compute alphas, betas
_lowerCAmelCase =self.alphas_cumprod[t]
_lowerCAmelCase =self.alphas_cumprod[t - 1] if t > 0 else self.one
_lowerCAmelCase =1 - alpha_prod_t
_lowerCAmelCase =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_lowerCAmelCase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_lowerCAmelCase =model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_lowerCAmelCase =self.bit_scale
if self.config.clip_sample:
_lowerCAmelCase =torch.clamp(__UpperCamelCase , -scale , __UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase =(alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_lowerCAmelCase =self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCAmelCase =0
if t > 0:
_lowerCAmelCase =torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__UpperCamelCase ).to(model_output.device )
_lowerCAmelCase =(self._get_variance(__UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
_lowerCAmelCase =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1.0 , ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =bit_scale
_lowerCAmelCase =(
ddim_bit_scheduler_step if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 2_56 , __UpperCAmelCase = 2_56 , __UpperCAmelCase = 50 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
_lowerCAmelCase =torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__UpperCAmelCase , )
_lowerCAmelCase =decimal_to_bits(__UpperCAmelCase ) * self.bit_scale
_lowerCAmelCase =latents.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_lowerCAmelCase =self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase =self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
_lowerCAmelCase =bits_to_decimal(__UpperCAmelCase )
if output_type == "pil":
_lowerCAmelCase =self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 341 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> bool:
_lowerCAmelCase =str(__UpperCamelCase )
return n == n[::-1]
def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str:
_lowerCAmelCase =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 341 | 1 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__A = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
__A = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
def remove_articles(__UpperCamelCase ):
_lowerCAmelCase =re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(__UpperCamelCase , """ """ , __UpperCamelCase )
def white_space_fix(__UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase ):
_lowerCAmelCase =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =[any(compute_exact(__UpperCamelCase , __UpperCamelCase ) for ref in refs ) for pred, refs in zip(__UpperCamelCase , __UpperCamelCase )]
return (sum(__UpperCamelCase ) / len(__UpperCamelCase )) * 100
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
_lowerCAmelCase =[rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase =Counter(__UpperCamelCase )
_lowerCAmelCase =Counter(__UpperCamelCase )
_lowerCAmelCase =Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase =scount * numref
_lowerCAmelCase =Counter(__UpperCamelCase )
_lowerCAmelCase =Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase =ccount * numref
# KEEP
_lowerCAmelCase =sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase =keepgramcounter_rep & rgramcounter
_lowerCAmelCase =sgramcounter_rep & rgramcounter
_lowerCAmelCase =0
_lowerCAmelCase =0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase =1
_lowerCAmelCase =1
if len(__UpperCamelCase ) > 0:
_lowerCAmelCase =keeptmpscorea / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase =keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase =0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase =2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase =sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase =delgramcounter_rep - rgramcounter
_lowerCAmelCase =sgramcounter_rep - rgramcounter
_lowerCAmelCase =0
_lowerCAmelCase =0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase =1
if len(__UpperCamelCase ) > 0:
_lowerCAmelCase =deltmpscorea / len(__UpperCamelCase )
# ADDITION
_lowerCAmelCase =set(__UpperCamelCase ) - set(__UpperCamelCase )
_lowerCAmelCase =set(__UpperCamelCase ) & set(__UpperCamelCase )
_lowerCAmelCase =set(__UpperCamelCase ) - set(__UpperCamelCase )
_lowerCAmelCase =0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase =1
_lowerCAmelCase =1
if len(__UpperCamelCase ) > 0:
_lowerCAmelCase =addtmpscore / len(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_lowerCAmelCase =addtmpscore / len(__UpperCamelCase )
_lowerCAmelCase =0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase =2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =len(__UpperCamelCase )
_lowerCAmelCase =ssent.split(""" """ )
_lowerCAmelCase =csent.split(""" """ )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for rsent in rsents:
_lowerCAmelCase =rsent.split(""" """ )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
_lowerCAmelCase =ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
_lowerCAmelCase =ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
_lowerCAmelCase =ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
ragramslist.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
_lowerCAmelCase =sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
_lowerCAmelCase =sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
_lowerCAmelCase =sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(__UpperCamelCase )
for i in range(0 , len(__UpperCamelCase ) - 1 ):
if i < len(__UpperCamelCase ) - 1:
_lowerCAmelCase =cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 2:
_lowerCAmelCase =cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(__UpperCamelCase )
if i < len(__UpperCamelCase ) - 3:
_lowerCAmelCase =cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(__UpperCamelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =SARIngram(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase =sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase =sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase =(avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = "13a" , __UpperCamelCase = True ) -> str:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCAmelCase =sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase =sacrebleu.metrics.bleu._get_tokenizer(__UpperCamelCase )()(__UpperCamelCase )
else:
_lowerCAmelCase =sacrebleu.TOKENIZERS[tokenizer]()(__UpperCamelCase )
elif tokenizer == "moses":
_lowerCAmelCase =sacremoses.MosesTokenizer().tokenize(__UpperCamelCase , return_str=__UpperCamelCase , escape=__UpperCamelCase )
elif tokenizer == "penn":
_lowerCAmelCase =sacremoses.MosesTokenizer().penn_tokenize(__UpperCamelCase , return_str=__UpperCamelCase )
else:
_lowerCAmelCase =sentence
if not return_str:
_lowerCAmelCase =normalized_sent.split()
return normalized_sent
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
if not (len(__UpperCamelCase ) == len(__UpperCamelCase ) == len(__UpperCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
_lowerCAmelCase =0
for src, pred, refs in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
sari_score += SARIsent(normalize(__UpperCamelCase ) , normalize(__UpperCamelCase ) , [normalize(__UpperCamelCase ) for sent in refs] )
_lowerCAmelCase =sari_score / len(__UpperCamelCase )
return 100 * sari_score
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase="exp" , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , ) -> int:
_lowerCAmelCase =len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
_lowerCAmelCase =[[refs[i] for refs in references] for i in range(__UpperCamelCase )]
_lowerCAmelCase =sacrebleu.corpus_bleu(
__UpperCamelCase , __UpperCamelCase , smooth_method=__UpperCamelCase , smooth_value=__UpperCamelCase , force=__UpperCamelCase , lowercase=__UpperCamelCase , use_effective_order=__UpperCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_lowerCAmelCase ={}
result.update({"""sari""": compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({"""exact""": compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
return result
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
if len(__UpperCamelCase ) < k or k < 0:
raise ValueError("""Invalid Input""" )
_lowerCAmelCase =_lowerCAmelCase =sum(array[:k] )
for i in range(len(__UpperCamelCase ) - k ):
_lowerCAmelCase =current_sum - array[i] + array[i + k]
_lowerCAmelCase =max(__UpperCamelCase , __UpperCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__A = [randint(-1000, 1000) for i in range(100)]
__A = randint(0, 110)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 341 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _lowerCamelCase(__UpperCamelCase ) -> list[list[float]]:
_lowerCAmelCase =Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_lowerCAmelCase =float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_lowerCAmelCase =[[0.0, 0.0], [0.0, 0.0]]
_lowerCAmelCase , _lowerCAmelCase =matrix[1][1], matrix[0][0]
_lowerCAmelCase , _lowerCAmelCase =-matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__UpperCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_lowerCAmelCase =float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_lowerCAmelCase =[
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_lowerCAmelCase =(d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_lowerCAmelCase =-(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_lowerCAmelCase =(d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_lowerCAmelCase =-(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_lowerCAmelCase =(d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_lowerCAmelCase =-(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_lowerCAmelCase =(d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_lowerCAmelCase =-(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_lowerCAmelCase =(d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_lowerCAmelCase =array(__UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
_lowerCAmelCase =cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_lowerCAmelCase =array(__UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__UpperCamelCase )
# Calculate the inverse of the matrix
return [[float(d(__UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 341 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341 | 1 |
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
__A = 'src/transformers'
# Matches is_xxx_available()
__A = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__A = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__A = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__A = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__A = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__A = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__A = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__A = re.compile(r'^\s*try:')
# Catches a line with else:
__A = re.compile(r'^\s*else:')
def _lowerCamelCase(__UpperCamelCase ) -> str:
if _re_test_backend.search(__UpperCamelCase ) is None:
return None
_lowerCAmelCase =[b[0] for b in _re_backend.findall(__UpperCamelCase )]
backends.sort()
return "_and_".join(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase =f.readlines()
_lowerCAmelCase =0
while line_index < len(__UpperCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCAmelCase =[]
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
_lowerCAmelCase =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCamelCase ):
_lowerCAmelCase =_re_one_line_import_struct.search(__UpperCamelCase ).groups()[0]
_lowerCAmelCase =re.findall(R"""\[([^\]]+)\]""" , __UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
_lowerCAmelCase =_re_import_struct_key_value.search(__UpperCamelCase )
if single_line_import_search is not None:
_lowerCAmelCase =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
_lowerCAmelCase ={"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCAmelCase =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
_lowerCAmelCase =lines[line_index]
if _re_import_struct_add_one.search(__UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCamelCase ) is not None:
_lowerCAmelCase =_re_import_struct_add_many.search(__UpperCamelCase ).groups()[0].split(""", """ )
_lowerCAmelCase =[obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_between_brackets.search(__UpperCamelCase ) is not None:
_lowerCAmelCase =_re_between_brackets.search(__UpperCamelCase ).groups()[0].split(""", """ )
_lowerCAmelCase =[obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_quote_object.search(__UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
_lowerCAmelCase =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCAmelCase =[]
while (
line_index < len(__UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
_lowerCAmelCase =lines[line_index]
_lowerCAmelCase =_re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCAmelCase ={"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCAmelCase =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
_lowerCAmelCase =lines[line_index]
_lowerCAmelCase =_re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCAmelCase =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> List[str]:
def find_duplicates(__UpperCamelCase ):
return [k for k, v in collections.Counter(__UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCAmelCase =[]
for key in import_dict_objects.keys():
_lowerCAmelCase =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_lowerCAmelCase =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCAmelCase ="""base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _lowerCamelCase() -> Optional[int]:
_lowerCAmelCase =[]
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase =os.path.join(__UpperCamelCase , """__init__.py""" )
_lowerCAmelCase =parse_init(__UpperCamelCase )
if objects is not None:
_lowerCAmelCase =analyze_results(*__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_lowerCAmelCase =F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(__UpperCamelCase ) )
if len(__UpperCamelCase ) > 0:
raise ValueError("""\n\n""".join(__UpperCamelCase ) )
def _lowerCamelCase() -> Union[str, Any]:
_lowerCAmelCase =[]
for path, directories, files in os.walk(__UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
_lowerCAmelCase =str((Path(__UpperCamelCase ) / folder).relative_to(__UpperCamelCase ) )
_lowerCAmelCase =short_path.replace(os.path.sep , """.""" )
submodules.append(__UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
_lowerCAmelCase =str((Path(__UpperCamelCase ) / fname).relative_to(__UpperCamelCase ) )
_lowerCAmelCase =short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__UpperCamelCase )
return submodules
__A = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def _lowerCamelCase() -> Any:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_lowerCAmelCase =direct_transformers_import(__UpperCamelCase )
_lowerCAmelCase =set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCamelCase , """__init__.py""" ) , """r""" ) as f:
_lowerCAmelCase =f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , __UpperCamelCase ) ) )
_lowerCAmelCase =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCamelCase ) > 0:
_lowerCAmelCase ="""\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 341 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = JukeboxTokenizer
lowerCamelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowerCAmelCase ( self ) -> str:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 341 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''gpt_neox'''
def __init__( self , __UpperCAmelCase=5_04_32 , __UpperCAmelCase=61_44 , __UpperCAmelCase=44 , __UpperCAmelCase=64 , __UpperCAmelCase=2_45_76 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.2_5 , __UpperCAmelCase=1_00_00 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Tuple:
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =rotary_pct
_lowerCAmelCase =rotary_emb_base
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =hidden_dropout
_lowerCAmelCase =classifier_dropout
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =use_cache
_lowerCAmelCase =tie_word_embeddings
_lowerCAmelCase =use_parallel_residual
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def _lowerCAmelCase ( self ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 | 1 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> float:
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
_lowerCAmelCase =sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__UpperCamelCase ) )
return round(__UpperCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''gpt_bigcode'''
lowerCamelCase = ['''past_key_values''']
lowerCamelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __UpperCAmelCase=5_02_57 , __UpperCAmelCase=10_24 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_pytorch_tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=5_02_56 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Optional[int]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =n_positions
_lowerCAmelCase =n_embd
_lowerCAmelCase =n_layer
_lowerCAmelCase =n_head
_lowerCAmelCase =n_inner
_lowerCAmelCase =activation_function
_lowerCAmelCase =resid_pdrop
_lowerCAmelCase =embd_pdrop
_lowerCAmelCase =attn_pdrop
_lowerCAmelCase =layer_norm_epsilon
_lowerCAmelCase =initializer_range
_lowerCAmelCase =scale_attn_weights
_lowerCAmelCase =use_cache
_lowerCAmelCase =attention_softmax_in_fpaa
_lowerCAmelCase =scale_attention_softmax_in_fpaa
_lowerCAmelCase =multi_query
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 341 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 1 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =int(__UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =divmod(__UpperCamelCase , 2 )
return binary_recursive(__UpperCamelCase ) + str(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =str(__UpperCamelCase ).strip()
if not number:
raise ValueError("""No input value was provided""" )
_lowerCAmelCase ="""-""" if number.startswith("""-""" ) else """"""
_lowerCAmelCase =number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F'''{negative}0b{binary_recursive(int(__UpperCamelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_lowerCAmelCase =model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase ="""sshleifer/tiny-gpt2"""
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase ="""sgugger/tiny-distilbert-classification"""
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""sshleifer/tiny-gpt2"""
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , torchscript=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase ="""sshleifer/tiny-gpt2"""
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , fpaa=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase ="""sshleifer/tiny-gpt2"""
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
# set architectures equal to `None`
_lowerCAmelCase =None
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase ="""sshleifer/tiny-gpt2"""
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase ="""sshleifer/tiny-gpt2"""
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase ="""sshleifer/tiny-gpt2"""
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""sshleifer/tinier_bart"""
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase ="""sshleifer/tiny-gpt2"""
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase ="""sshleifer/tinier_bart"""
_lowerCAmelCase =AutoConfig.from_pretrained(__UpperCAmelCase )
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase , configs=[config] )
_lowerCAmelCase =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase ="""sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(__UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(__UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(__UpperCAmelCase , """env.csv""" ) , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """env.csv""" ) ).exists() )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ="""sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__UpperCAmelCase ):
self.assertTrue(hasattr(__UpperCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """current""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , """log.txt""" ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_lowerCAmelCase =PyTorchBenchmark(__UpperCAmelCase )
_lowerCAmelCase =benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , """log.txt""" ) ).exists() )
| 341 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" )
_lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" )
_lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" )
_lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" )
_lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item
_lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" )
_lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" )
_lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCAmelCase =old_checkpoint[path]
_lowerCAmelCase =old_tensor.shape[0] // 3
_lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 )
_lowerCAmelCase =query.reshape(__UpperCamelCase )
_lowerCAmelCase =key.reshape(__UpperCamelCase )
_lowerCAmelCase =value.reshape(__UpperCamelCase )
for path in paths:
_lowerCAmelCase =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0]
else:
_lowerCAmelCase =old_checkpoint[path["""old"""]]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase =checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.2.bias"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase =checkpoint["""out.0.weight"""]
_lowerCAmelCase =checkpoint["""out.0.bias"""]
_lowerCAmelCase =checkpoint["""out.2.weight"""]
_lowerCAmelCase =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
_lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
_lowerCAmelCase =middle_blocks[0]
_lowerCAmelCase =middle_blocks[1]
_lowerCAmelCase =middle_blocks[2]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
_lowerCAmelCase =i // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =i % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
_lowerCAmelCase ={}
for layer in output_block_layers:
_lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
_lowerCAmelCase =[layer_name]
if len(__UpperCamelCase ) > 1:
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
_lowerCAmelCase =[]
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
_lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
_lowerCAmelCase =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 341 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'vocab.txt'}
__A = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__A = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
with open(__UpperCamelCase , """r""" ) as f:
_lowerCAmelCase =f.read().splitlines()
return [l.strip() for l in lines]
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase="<eos>" , **__UpperCAmelCase , ) -> Optional[int]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =load_vocab_file(__UpperCAmelCase )
_lowerCAmelCase =dict(enumerate(self.all_tokens ) )
_lowerCAmelCase ={tok: ind for ind, tok in enumerate(self.all_tokens )}
_lowerCAmelCase =unk_token
_lowerCAmelCase =cls_token
_lowerCAmelCase =pad_token
_lowerCAmelCase =mask_token
_lowerCAmelCase =eos_token
_lowerCAmelCase =self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
return self._id_to_token.get(__UpperCAmelCase , self.unk_token )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int:
return self._token_to_id.get(__UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
return text.split()
def _lowerCAmelCase ( self , __UpperCAmelCase=False ) -> str:
return len(self._id_to_token )
def _lowerCAmelCase ( self ) -> Optional[Any]:
return {token: i for i, token in enumerate(self.all_tokens )}
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int:
return self._token_to_id.get(__UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
return self._id_to_token.get(__UpperCAmelCase , self.unk_token )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_lowerCAmelCase =[1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(__UpperCAmelCase ) + [1]
return mask
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =os.path.join(__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(__UpperCAmelCase , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def _lowerCAmelCase ( self ) -> int:
return self.get_vocab_size(with_added_tokens=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ) -> int:
return super()._add_tokens(__UpperCAmelCase , special_tokens=__UpperCAmelCase )
| 341 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
if len(__UpperCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase =len(__UpperCamelCase ) // 2
_lowerCAmelCase =arr[0:mid]
_lowerCAmelCase =arr[mid:]
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCamelCase() -> str:
_lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase =[]
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 | 1 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str:
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
_lowerCAmelCase =features.copy() if features else default_expected_features
_lowerCAmelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase =TextDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
_lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if issubclass(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =text_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =[text_path]
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
_lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ) -> List[str]:
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
_lowerCAmelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase =TextDatasetReader({"""train""": text_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
_lowerCAmelCase =tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_lowerCAmelCase ={"""text""": """string"""}
_lowerCAmelCase =features.copy() if features else default_expected_features
_lowerCAmelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase =TextDatasetReader({"""train""": text_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
if split:
_lowerCAmelCase ={split: text_path}
else:
_lowerCAmelCase ="""train"""
_lowerCAmelCase ={"""train""": text_path, """test""": text_path}
_lowerCAmelCase =tmp_path / """cache"""
_lowerCAmelCase ={"""text""": """string"""}
_lowerCAmelCase =TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 341 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341 | 1 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__A = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
__A = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
__A = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
__A = tf.keras.preprocessing.image.img_to_array(test_image)
__A = np.expand_dims(test_image, axis=0)
__A = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A = 'Normal'
if result[0][0] == 1:
__A = 'Abnormality detected'
| 341 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple:
_lowerCAmelCase =compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 341 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A = 16
__A = 32
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase = 16 , __UpperCamelCase = "bert-base-cased" ) -> Optional[int]:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__UpperCamelCase )
_lowerCAmelCase =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase =datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
_lowerCAmelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
model.eval()
_lowerCAmelCase =0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCamelCase )
_lowerCAmelCase =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
_lowerCAmelCase =predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
_lowerCAmelCase =metric.compute()
return eval_metric["accuracy"]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple:
# Initialize accelerator
_lowerCAmelCase =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase =config["""lr"""]
_lowerCAmelCase =int(config["""num_epochs"""] )
_lowerCAmelCase =int(config["""seed"""] )
_lowerCAmelCase =int(config["""batch_size"""] )
_lowerCAmelCase =args.model_name_or_path
set_seed(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase =AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
# Instantiate optimizer
_lowerCAmelCase =(
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase =optimizer_cls(params=model.parameters() , lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase =accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCAmelCase =1
_lowerCAmelCase =(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase =get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , )
else:
_lowerCAmelCase =DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase =0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase =0
_lowerCAmelCase =evaluate.load("""glue""" , """mrpc""" )
_lowerCAmelCase =num_epochs
if args.partial_train_epoch is not None:
_lowerCAmelCase =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase =args.resume_from_checkpoint.split("""epoch_""" )[1]
_lowerCAmelCase =""""""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowerCAmelCase =int(__UpperCamelCase ) + 1
_lowerCAmelCase =evaluation_loop(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
accelerator.print("""resumed checkpoint performance:""" , __UpperCamelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , """r""" ) as f:
_lowerCAmelCase =json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowerCAmelCase ={}
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
_lowerCAmelCase =model(**__UpperCamelCase )
_lowerCAmelCase =outputs.loss
_lowerCAmelCase =loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowerCAmelCase =F'''epoch_{epoch}'''
_lowerCAmelCase =os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
_lowerCAmelCase =evaluation_loop(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =accuracy
_lowerCAmelCase =lr_scheduler.get_lr()[0]
_lowerCAmelCase =optimizer.param_groups[0]["""lr"""]
_lowerCAmelCase =epoch
_lowerCAmelCase =overall_step
accelerator.print(F'''epoch {epoch}:''' , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase() -> Any:
_lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCamelCase , )
parser.add_argument(
"""--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCamelCase , default=2 , help="""Number of train epochs.""" , )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase ={"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 341 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _lowerCamelCase() -> Any:
_lowerCAmelCase =ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
_lowerCAmelCase =parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__UpperCamelCase )
DownloadCommand.register_subcommand(__UpperCamelCase )
EnvironmentCommand.register_subcommand(__UpperCamelCase )
RunCommand.register_subcommand(__UpperCamelCase )
ServeCommand.register_subcommand(__UpperCamelCase )
UserCommands.register_subcommand(__UpperCamelCase )
AddNewModelCommand.register_subcommand(__UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(__UpperCamelCase )
LfsCommands.register_subcommand(__UpperCamelCase )
PTtoTFCommand.register_subcommand(__UpperCamelCase )
# Let's go
_lowerCAmelCase =parser.parse_args()
if not hasattr(__UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
_lowerCAmelCase =args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = RoFormerTokenizer
lowerCamelCase = RoFormerTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def _lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__UpperCAmelCase )
def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Dict:
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ="""永和服装饰品有限公司,今天天气非常好"""
_lowerCAmelCase ="""永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase , _lowerCAmelCase =self.get_chinese_input_output_texts()
_lowerCAmelCase =tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
_lowerCAmelCase =tokens + [tokenizer.unk_token]
_lowerCAmelCase =[2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase =self.get_rust_tokenizer()
_lowerCAmelCase , _lowerCAmelCase =self.get_chinese_input_output_texts()
_lowerCAmelCase =tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , output_text.split() )
_lowerCAmelCase =tokens + [tokenizer.unk_token]
_lowerCAmelCase =[2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[int]:
pass
def _lowerCAmelCase ( self ) -> int:
pass
def _lowerCAmelCase ( self ) -> str:
pass
| 341 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict:
_lowerCAmelCase ={doc: key_lines}
_lowerCAmelCase ={doc: sys_lines}
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowerCAmelCase =(conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
_lowerCAmelCase =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase =line.split()[5]
if not parse_col == "-":
_lowerCAmelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase =evaluate(
key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , )
return score
| 341 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[2, 2, 3, 2] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=["stage2", "stage3", "stage4"] , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) -> str:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =num_stages
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =depths
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =out_features
_lowerCAmelCase =num_labels
_lowerCAmelCase =scope
_lowerCAmelCase =num_stages
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ) -> int:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _lowerCAmelCase ( self ) -> Dict:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCAmelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =UperNetForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCamelCase = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =UperNetModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _lowerCAmelCase ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self ) -> str:
return
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _lowerCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _lowerCAmelCase ( self ) -> Any:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ) -> Optional[int]:
pass
def _lowerCAmelCase ( self ) -> Union[str, Any]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowerCAmelCase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_lowerCAmelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase =self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =_config_zero_init(__UpperCAmelCase )
_lowerCAmelCase =_config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _lowerCAmelCase ( self ) -> Optional[int]:
pass
@slow
def _lowerCAmelCase ( self ) -> List[str]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =UperNetForSemanticSegmentation.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _lowerCamelCase() -> Dict:
_lowerCAmelCase =hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_lowerCAmelCase =Image.open(__UpperCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_lowerCAmelCase =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(__UpperCAmelCase )
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
_lowerCAmelCase =torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_lowerCAmelCase =torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_lowerCAmelCase =UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(__UpperCAmelCase )
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
with torch.no_grad():
_lowerCAmelCase =model(**__UpperCAmelCase )
_lowerCAmelCase =torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_lowerCAmelCase =torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 341 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = XGLMConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =ffn_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Dict:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =TFXGLMModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
_lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase ="""left"""
# use different length sentences to test batching
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
_lowerCAmelCase =inputs["""input_ids"""]
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 341 | 1 |
"""simple docstring"""
__A = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
__A = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
_lowerCAmelCase =from_type.lower().strip("""s""" )
_lowerCAmelCase =to_type.lower().strip("""s""" )
_lowerCAmelCase =UNIT_SYMBOL.get(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =UNIT_SYMBOL.get(__UpperCamelCase , __UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase =(
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase =(
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
_lowerCAmelCase =METRIC_CONVERSION[from_sanitized]
_lowerCAmelCase =METRIC_CONVERSION[to_sanitized]
_lowerCAmelCase =1
if from_exponent > to_exponent:
_lowerCAmelCase =from_exponent - to_exponent
else:
_lowerCAmelCase =-(to_exponent - from_exponent)
return value * pow(10 , __UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> bool:
_lowerCAmelCase =str(__UpperCamelCase )
return n == n[::-1]
def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str:
_lowerCAmelCase =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 341 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> bool:
_lowerCAmelCase =str(__UpperCamelCase )
return n == n[::-1]
def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str:
_lowerCAmelCase =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 341 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__A = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__A = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__A = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = MgpstrTokenizer
lowerCamelCase = False
lowerCamelCase = {}
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# fmt: off
_lowerCAmelCase =["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCAmelCase =dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Any:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase ="""tester"""
_lowerCAmelCase ="""tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def _lowerCAmelCase ( self ) -> str:
pass
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCAmelCase ="""[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_lowerCAmelCase =tokenizer.encode([special_token] , add_special_tokens=__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
_lowerCAmelCase =tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
_lowerCAmelCase , _lowerCAmelCase =self.get_input_output_texts(__UpperCAmelCase )
_lowerCAmelCase =tokenizer.tokenize(__UpperCAmelCase )
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
_lowerCAmelCase =tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertNotEqual(len(__UpperCAmelCase ) , 0 )
_lowerCAmelCase =tokenizer.decode(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , __UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def _lowerCAmelCase ( self ) -> str:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
| 341 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
| 341 | 1 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =[0] * len(__UpperCamelCase )
_lowerCAmelCase =[]
_lowerCAmelCase =[1] * len(__UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCamelCase )
while queue:
_lowerCAmelCase =queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_lowerCAmelCase =long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__UpperCamelCase )
print(max(__UpperCamelCase ) )
# Adjacency list of Graph
__A = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 341 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341 | 1 |
"""simple docstring"""
__A = 'Input must be a string of 8 numbers plus letter'
__A = 'TRWAGMYFPDXBNJZSQVHLCKE'
def _lowerCamelCase(__UpperCamelCase ) -> bool:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =F'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
_lowerCAmelCase =spanish_id.replace("""-""" , """""" ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
_lowerCAmelCase =int(spanish_id_clean[0:8] )
_lowerCAmelCase =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = JukeboxTokenizer
lowerCamelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowerCAmelCase ( self ) -> str:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(__UpperCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , )
def _lowerCamelCase() -> None:
_lowerCAmelCase =[90, 23, 6, 33, 21, 65, 123, 34423]
_lowerCAmelCase =math.log(len(__UpperCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 341 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
_lowerCAmelCase =os.path.join(args.tf_model_dir , """parameters.json""" )
_lowerCAmelCase =json.loads(open(__UpperCamelCase ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(""".pt""" ):
_lowerCAmelCase =args.output + """.pt"""
_lowerCAmelCase =OrderedDict()
with tf.device("""/CPU:0""" ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(__UpperCamelCase ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
_lowerCAmelCase =8
_lowerCAmelCase ="""model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/moe""" ):
_lowerCAmelCase =int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
_lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/softmlp/kernel""" ):
_lowerCAmelCase ="""model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(16 ):
_lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/mlp""" ):
_lowerCAmelCase =int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
_lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.wi.weight""" % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/p1/bias""" ):
_lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.wi.bias""" % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/p2/kernel""" ):
_lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.wo.weight""" % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/p2/bias""" ):
_lowerCAmelCase ="""model.blocks.%d.feed_forward.mlp.wo.bias""" % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/ln""" ):
_lowerCAmelCase =int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_lowerCAmelCase ="""model.blocks.%d.feed_forward.norm.bias""" % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/g""" ):
_lowerCAmelCase ="""model.blocks.%d.feed_forward.norm.weight""" % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/att""" ):
_lowerCAmelCase =int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ="""model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
_lowerCAmelCase ="""model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
_lowerCAmelCase ="""model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/o/kernel""" ):
_lowerCAmelCase ="""model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/an""" ):
_lowerCAmelCase =int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
_lowerCAmelCase ="""model.blocks.%d.self_attn.norm.bias""" % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.endswith("""/g""" ):
_lowerCAmelCase ="""model.blocks.%d.self_attn.norm.weight""" % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
_lowerCAmelCase ={"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
_lowerCAmelCase ="""model.%s.weight""" % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
if key_name.startswith("""model/wte""" ):
_lowerCAmelCase ="""lm_head.weight"""
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name.startswith("""model/wob""" ):
_lowerCAmelCase ="""final_logits_bias"""
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ="""model.last_project.weight"""
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ="""model.last_project.bias"""
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(__UpperCamelCase )
torch.save(__UpperCamelCase , args.output )
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
__A = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 341 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''beit'''
def __init__( self , __UpperCAmelCase=81_92 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=[3, 5, 7, 11] , __UpperCAmelCase=[1, 2, 3, 6] , __UpperCAmelCase=True , __UpperCAmelCase=0.4 , __UpperCAmelCase=2_56 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=2_55 , **__UpperCAmelCase , ) -> Any:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =use_mask_token
_lowerCAmelCase =use_absolute_position_embeddings
_lowerCAmelCase =use_relative_position_bias
_lowerCAmelCase =use_shared_relative_position_bias
_lowerCAmelCase =layer_scale_init_value
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase =out_indices
_lowerCAmelCase =pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase =use_auxiliary_head
_lowerCAmelCase =auxiliary_loss_weight
_lowerCAmelCase =auxiliary_channels
_lowerCAmelCase =auxiliary_num_convs
_lowerCAmelCase =auxiliary_concat_input
_lowerCAmelCase =semantic_loss_ignore_index
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = version.parse('''1.11''' )
@property
def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowerCAmelCase ( self ) -> float:
return 1e-4
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> list[int]:
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def _lowerCamelCase(__UpperCamelCase ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def _lowerCamelCase() -> None:
_lowerCAmelCase =encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , __UpperCamelCase )
print("""Decoded:""" , decode(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 341 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 1 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
_lowerCAmelCase , _lowerCAmelCase =grid.shape
_lowerCAmelCase =[-1, 1, 0, 0]
_lowerCAmelCase =[0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_lowerCAmelCase , _lowerCAmelCase =[(0, source)], set()
_lowerCAmelCase =np.full((rows, cols) , np.inf )
_lowerCAmelCase =0
_lowerCAmelCase =np.empty((rows, cols) , dtype=__UpperCamelCase )
_lowerCAmelCase =None
while queue:
((_lowerCAmelCase) , (_lowerCAmelCase)) =heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_lowerCAmelCase =[]
while (x, y) != source:
path.append((x, y) )
_lowerCAmelCase , _lowerCAmelCase =predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
_lowerCAmelCase , _lowerCAmelCase =x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_lowerCAmelCase =grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase , (dist + 1, (nx, ny)) )
_lowerCAmelCase =dist + 1
_lowerCAmelCase =(x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=1024 , __UpperCamelCase=1024 , __UpperCamelCase=False , **__UpperCamelCase ) -> int:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__UpperCamelCase )
_lowerCAmelCase =SeqaSeqDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , type_path="""train""" , **__UpperCamelCase )
_lowerCAmelCase =tok.pad_token_id
def get_lens(__UpperCamelCase ):
_lowerCAmelCase =tqdm(
DataLoader(__UpperCamelCase , batch_size=512 , num_workers=8 , shuffle=__UpperCamelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_lowerCAmelCase =[]
for batch in dl:
_lowerCAmelCase =batch["""input_ids"""].ne(__UpperCamelCase ).sum(1 ).tolist()
_lowerCAmelCase =batch["""labels"""].ne(__UpperCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__UpperCamelCase , __UpperCamelCase ):
max_lens.append(max(__UpperCamelCase , __UpperCamelCase ) )
else:
max_lens.extend(__UpperCamelCase )
return max_lens
_lowerCAmelCase =get_lens(__UpperCamelCase )
_lowerCAmelCase =SeqaSeqDataset(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , type_path="""val""" , **__UpperCamelCase )
_lowerCAmelCase =get_lens(__UpperCamelCase )
pickle_save(__UpperCamelCase , train_ds.len_file )
pickle_save(__UpperCamelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
__A = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_02_17_66_34E-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355_818,
}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_lowerCAmelCase =(
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {', '.join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" )
_lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" )
_lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" )
_lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" )
_lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item
_lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" )
_lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" )
_lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCAmelCase =old_checkpoint[path]
_lowerCAmelCase =old_tensor.shape[0] // 3
_lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 )
_lowerCAmelCase =query.reshape(__UpperCamelCase )
_lowerCAmelCase =key.reshape(__UpperCamelCase )
_lowerCAmelCase =value.reshape(__UpperCamelCase )
for path in paths:
_lowerCAmelCase =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0]
else:
_lowerCAmelCase =old_checkpoint[path["""old"""]]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase =checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.2.bias"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase =checkpoint["""out.0.weight"""]
_lowerCAmelCase =checkpoint["""out.0.bias"""]
_lowerCAmelCase =checkpoint["""out.2.weight"""]
_lowerCAmelCase =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
_lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
_lowerCAmelCase =middle_blocks[0]
_lowerCAmelCase =middle_blocks[1]
_lowerCAmelCase =middle_blocks[2]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
_lowerCAmelCase =i // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =i % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
_lowerCAmelCase ={}
for layer in output_block_layers:
_lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
_lowerCAmelCase =[layer_name]
if len(__UpperCamelCase ) > 1:
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
_lowerCAmelCase =[]
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
_lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
_lowerCAmelCase =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 341 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase ="""ZinengTang/tvlt-base"""
_lowerCAmelCase =tempfile.mkdtemp()
def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Tuple:
return TvltImageProcessor.from_pretrained(self.checkpoint , **__UpperCAmelCase )
def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Dict:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_feature_extractor()
_lowerCAmelCase =TvltProcessor(image_processor=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase =TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __UpperCAmelCase )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_feature_extractor()
_lowerCAmelCase =TvltProcessor(image_processor=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
_lowerCAmelCase =np.ones([1_20_00] )
_lowerCAmelCase =feature_extractor(__UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase =processor(audio=__UpperCAmelCase , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_feature_extractor()
_lowerCAmelCase =TvltProcessor(image_processor=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
_lowerCAmelCase =np.ones([3, 2_24, 2_24] )
_lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase =processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_feature_extractor()
_lowerCAmelCase =TvltProcessor(image_processor=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
_lowerCAmelCase =np.ones([1_20_00] )
_lowerCAmelCase =np.ones([3, 2_24, 2_24] )
_lowerCAmelCase =processor(audio=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =self.get_image_processor()
_lowerCAmelCase =self.get_feature_extractor()
_lowerCAmelCase =TvltProcessor(image_processor=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 341 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
if len(__UpperCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase =len(__UpperCamelCase ) // 2
_lowerCAmelCase =arr[0:mid]
_lowerCAmelCase =arr[mid:]
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCamelCase() -> str:
_lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase =[]
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]:
_lowerCAmelCase =[]
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict:
_lowerCAmelCase =[]
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def _lowerCamelCase(__UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def _lowerCamelCase() -> Union[str, Any]:
_lowerCAmelCase =[]
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""imagenet-1k-id2label.json"""
_lowerCAmelCase =1000
_lowerCAmelCase ="""huggingface/label-files"""
_lowerCAmelCase =num_labels
_lowerCAmelCase =json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
_lowerCAmelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase =idalabel
_lowerCAmelCase ={v: k for k, v in idalabel.items()}
_lowerCAmelCase =_lowerCAmelCase =CvtConfig(num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
_lowerCAmelCase =[1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
_lowerCAmelCase =[1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_lowerCAmelCase =[2, 2, 20]
_lowerCAmelCase =[3, 12, 16]
_lowerCAmelCase =[192, 768, 1024]
_lowerCAmelCase =CvtForImageClassification(__UpperCamelCase )
_lowerCAmelCase =AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
_lowerCAmelCase =image_size
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )
_lowerCAmelCase =OrderedDict()
_lowerCAmelCase =[]
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_lowerCAmelCase =list_of_state_dict + cls_token(__UpperCamelCase )
_lowerCAmelCase =list_of_state_dict + embeddings(__UpperCamelCase )
for cnt in range(config.depth[idx] ):
_lowerCAmelCase =list_of_state_dict + attention(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =list_of_state_dict + final()
for gg in list_of_state_dict:
print(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
_lowerCAmelCase =original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
image_processor.save_pretrained(__UpperCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__A = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 341 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = None
lowerCamelCase = 1
lowerCamelCase = None
lowerCamelCase = False
lowerCamelCase = None
lowerCamelCase = None
def _lowerCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
| 341 | 1 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__A = logging.getLogger(__name__)
__A = 'pytorch_model.bin'
@dataclasses.dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
lowerCamelCase = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
lowerCamelCase = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
lowerCamelCase = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
lowerCamelCase = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''The name of the task to train on.'''} , )
lowerCamelCase = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
lowerCamelCase = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
lowerCamelCase = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
lowerCamelCase = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
lowerCamelCase = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
lowerCamelCase = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
lowerCamelCase = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
lowerCamelCase = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
lowerCamelCase = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
lowerCamelCase = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
lowerCamelCase = dataclasses.field(
default=__magic_name__ , metadata={'''help''': '''Random seed for initialization.'''} , )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_lowerCAmelCase =dataset.filter(lambda __UpperCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_lowerCAmelCase =int(eval_result * len(__UpperCamelCase ) )
print(__UpperCamelCase )
_lowerCAmelCase =dataset.sort("""probability""" , reverse=__UpperCamelCase )
_lowerCAmelCase =dataset.select(range(__UpperCamelCase ) )
_lowerCAmelCase =dataset.remove_columns(["""label""", """probability"""] )
_lowerCAmelCase =dataset.rename_column("""prediction""" , """label""" )
_lowerCAmelCase =dataset.map(lambda __UpperCamelCase : {"label": idalabel[example["label"]]} )
_lowerCAmelCase =dataset.shuffle(seed=args.seed )
_lowerCAmelCase =os.path.join(__UpperCamelCase , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(__UpperCamelCase , index=__UpperCamelCase )
else:
dataset.to_json(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Union[str, Any]:
_lowerCAmelCase =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase =STModelArguments(model_name_or_path=__UpperCamelCase )
_lowerCAmelCase =STDataArguments(train_file=__UpperCamelCase , infer_file=__UpperCamelCase )
_lowerCAmelCase =STTrainingArguments(output_dir=__UpperCamelCase )
_lowerCAmelCase =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__UpperCamelCase ).items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for key, value in kwargs.items():
if hasattr(__UpperCamelCase , __UpperCamelCase ):
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Sanity checks
_lowerCAmelCase ={}
_lowerCAmelCase =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_lowerCAmelCase =args.train_file
_lowerCAmelCase =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_lowerCAmelCase =args.eval_file
for key in data_files:
_lowerCAmelCase =data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
_lowerCAmelCase =extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
_lowerCAmelCase =F'''{args.output_dir}/self-train_iter-{{}}'''.format
_lowerCAmelCase =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
accelerator.wait_for_everyone()
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =False
# Show the progress bar
_lowerCAmelCase =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_lowerCAmelCase =data_dir_format(__UpperCamelCase )
assert os.path.exists(__UpperCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_lowerCAmelCase =os.path.join(__UpperCamelCase , """stage-1""" )
_lowerCAmelCase ={
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__UpperCamelCase , __UpperCamelCase ):
arguments_dict.update({key: value} )
_lowerCAmelCase =os.path.join(__UpperCamelCase , """best-checkpoint""" , __UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , __UpperCamelCase , __UpperCamelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , __UpperCamelCase )
finetune(**__UpperCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCamelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , __UpperCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_lowerCAmelCase =os.path.join(__UpperCamelCase , """best-checkpoint""" )
_lowerCAmelCase =os.path.join(__UpperCamelCase , """stage-2""" )
# Update arguments_dict
_lowerCAmelCase =model_path
_lowerCAmelCase =data_files["""train"""]
_lowerCAmelCase =current_output_dir
_lowerCAmelCase =os.path.join(__UpperCamelCase , """best-checkpoint""" , __UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , __UpperCamelCase , __UpperCamelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , __UpperCamelCase )
finetune(**__UpperCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCamelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , __UpperCamelCase )
_lowerCAmelCase =iteration
_lowerCAmelCase =data_dir_format(iteration + 1 )
_lowerCAmelCase =AutoConfig.from_pretrained(os.path.join(__UpperCamelCase , """best-checkpoint""" ) )
_lowerCAmelCase =config.idalabel
_lowerCAmelCase =os.path.join(__UpperCamelCase , """eval_results_best-checkpoint.json""" )
_lowerCAmelCase =os.path.join(__UpperCamelCase , """test_results_best-checkpoint.json""" )
assert os.path.exists(__UpperCamelCase )
with open(__UpperCamelCase , """r""" ) as f:
_lowerCAmelCase =float(json.load(__UpperCamelCase )[args.eval_metric] )
_lowerCAmelCase =os.path.join(__UpperCamelCase , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(__UpperCamelCase )
# Loading the dataset from local csv or json files.
_lowerCAmelCase =load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
_lowerCAmelCase =load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
shutil.copy(__UpperCamelCase , os.path.join(__UpperCamelCase , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(__UpperCamelCase ):
shutil.copy(__UpperCamelCase , os.path.join(__UpperCamelCase , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
accelerator.wait_for_everyone()
_lowerCAmelCase =os.path.join(__UpperCamelCase , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_lowerCAmelCase =eval_result
if best_iteration is None:
_lowerCAmelCase =new_iteration
_lowerCAmelCase =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_lowerCAmelCase =new_iteration
_lowerCAmelCase =new_eval_result
_lowerCAmelCase =0
else:
if new_eval_result == best_eval_result:
_lowerCAmelCase =new_iteration
_lowerCAmelCase =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_lowerCAmelCase =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , __UpperCamelCase )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCamelCase , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(__UpperCamelCase , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCamelCase , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(__UpperCamelCase , """eval_results_best-iteration.json""" ) , )
| 341 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCamelCase() -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 341 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
# Load configuration defined in the metadata file
with open(__UpperCamelCase ) as metadata_file:
_lowerCAmelCase =json.load(__UpperCamelCase )
_lowerCAmelCase =LukeConfig(use_entity_aware_attention=__UpperCamelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
_lowerCAmelCase =load_original_entity_vocab(__UpperCamelCase )
# add an entry for [MASK2]
_lowerCAmelCase =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_lowerCAmelCase =XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase =AddedToken("""<ent>""" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
_lowerCAmelCase =AddedToken("""<ent2>""" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , """tokenizer_config.json""" ) , """r""" ) as f:
_lowerCAmelCase =json.load(__UpperCamelCase )
_lowerCAmelCase ="""MLukeTokenizer"""
with open(os.path.join(__UpperCamelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
with open(os.path.join(__UpperCamelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =MLukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(["""@"""] )[0]
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(["""#"""] )[0]
_lowerCAmelCase =state_dict["""embeddings.word_embeddings.weight"""]
_lowerCAmelCase =word_emb[ent_init_index].unsqueeze(0 )
_lowerCAmelCase =word_emb[enta_init_index].unsqueeze(0 )
_lowerCAmelCase =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_lowerCAmelCase =state_dict[bias_name]
_lowerCAmelCase =decoder_bias[ent_init_index].unsqueeze(0 )
_lowerCAmelCase =decoder_bias[enta_init_index].unsqueeze(0 )
_lowerCAmelCase =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase =F'''encoder.layer.{layer_index}.attention.self.'''
_lowerCAmelCase =state_dict[prefix + matrix_name]
_lowerCAmelCase =state_dict[prefix + matrix_name]
_lowerCAmelCase =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase =state_dict["""entity_embeddings.entity_embeddings.weight"""]
_lowerCAmelCase =entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowerCAmelCase =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_lowerCAmelCase =state_dict["""entity_predictions.bias"""]
_lowerCAmelCase =entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowerCAmelCase =torch.cat([entity_prediction_bias, entity_mask_bias] )
_lowerCAmelCase =LukeForMaskedLM(config=__UpperCamelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
_lowerCAmelCase =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
_lowerCAmelCase =state_dict[key]
else:
_lowerCAmelCase =state_dict[key]
_lowerCAmelCase , _lowerCAmelCase =model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
if set(__UpperCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__UpperCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_lowerCAmelCase =MLukeTokenizer.from_pretrained(__UpperCamelCase , task="""entity_classification""" )
_lowerCAmelCase ="""ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
_lowerCAmelCase =(0, 9)
_lowerCAmelCase =tokenizer(__UpperCamelCase , entity_spans=[span] , return_tensors="""pt""" )
_lowerCAmelCase =model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase =torch.Size((1, 33, 768) )
_lowerCAmelCase =torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase =torch.Size((1, 1, 768) )
_lowerCAmelCase =torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_lowerCAmelCase =MLukeTokenizer.from_pretrained(__UpperCamelCase )
_lowerCAmelCase ="""Tokyo is the capital of <mask>."""
_lowerCAmelCase =(24, 30)
_lowerCAmelCase =tokenizer(__UpperCamelCase , entity_spans=[span] , return_tensors="""pt""" )
_lowerCAmelCase =model(**__UpperCamelCase )
_lowerCAmelCase =encoding["""input_ids"""][0].tolist()
_lowerCAmelCase =input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
_lowerCAmelCase =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__UpperCamelCase )
_lowerCAmelCase =outputs.entity_logits[0][0].argmax().item()
_lowerCAmelCase =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]:
_lowerCAmelCase =["""[MASK]""", """[PAD]""", """[UNK]"""]
_lowerCAmelCase =[json.loads(__UpperCamelCase ) for line in open(__UpperCamelCase )]
_lowerCAmelCase ={}
for entry in data:
_lowerCAmelCase =entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_lowerCAmelCase =entity_id
break
_lowerCAmelCase =F'''{language}:{entity_name}'''
_lowerCAmelCase =entity_id
return new_mapping
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 341 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple:
_lowerCAmelCase =compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 341 | 1 |
"""simple docstring"""
import math
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase = 0 , __UpperCamelCase = 0 ) -> list:
_lowerCAmelCase =end or len(__UpperCamelCase )
for i in range(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =i
_lowerCAmelCase =array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowerCAmelCase =array[temp_index - 1]
temp_index -= 1
_lowerCAmelCase =temp_index_value
return array
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> None: # Max Heap
_lowerCAmelCase =index
_lowerCAmelCase =2 * index + 1 # Left Node
_lowerCAmelCase =2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowerCAmelCase =left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowerCAmelCase =right_index
if largest != index:
_lowerCAmelCase , _lowerCAmelCase =array[largest], array[index]
heapify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> list:
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for i in range(n - 1 , 0 , -1 ):
_lowerCAmelCase , _lowerCAmelCase =array[0], array[i]
heapify(__UpperCamelCase , 0 , __UpperCamelCase )
return array
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =low
_lowerCAmelCase =high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowerCAmelCase , _lowerCAmelCase =array[j], array[i]
i += 1
def _lowerCamelCase(__UpperCamelCase ) -> list:
if len(__UpperCamelCase ) == 0:
return array
_lowerCAmelCase =2 * math.ceil(math.loga(len(__UpperCamelCase ) ) )
_lowerCAmelCase =16
return intro_sort(__UpperCamelCase , 0 , len(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__UpperCamelCase )
max_depth -= 1
_lowerCAmelCase =median_of_a(__UpperCamelCase , __UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
_lowerCAmelCase =partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
intro_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =p
return insertion_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input('Enter numbers separated by a comma : ').strip()
__A = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 341 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 341 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowerCamelCase() -> int:
_lowerCAmelCase =ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=__UpperCamelCase )
_lowerCAmelCase =parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=__UpperCamelCase )
env_command_parser(subparsers=__UpperCamelCase )
launch_command_parser(subparsers=__UpperCamelCase )
tpu_command_parser(subparsers=__UpperCamelCase )
test_command_parser(subparsers=__UpperCamelCase )
# Let's go
_lowerCAmelCase =parser.parse_args()
if not hasattr(__UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(__UpperCamelCase )
if __name__ == "__main__":
main()
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="attention" ) -> Optional[int]:
_lowerCAmelCase =_lowerCAmelCase =np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_lowerCAmelCase =k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_lowerCAmelCase =np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_lowerCAmelCase =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_lowerCAmelCase =np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_lowerCAmelCase =q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_lowerCAmelCase =np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_lowerCAmelCase =v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Optional[int]:
if split_mlp_wi:
_lowerCAmelCase =params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_lowerCAmelCase =params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_lowerCAmelCase =(wi_a, wi_a)
else:
_lowerCAmelCase =params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_lowerCAmelCase =params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def _lowerCamelCase(__UpperCamelCase , *, __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> Any:
_lowerCAmelCase =traverse_util.flatten_dict(variables["""target"""] )
_lowerCAmelCase ={"""/""".join(__UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCAmelCase ="""encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __UpperCamelCase )
_lowerCAmelCase =collections.OrderedDict()
# Shared embeddings.
_lowerCAmelCase =old["""token_embedder/embedding"""]
# Encoder.
for i in range(__UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase =tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_attention_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """attention""" )
_lowerCAmelCase =layer_norm
_lowerCAmelCase =k.T
_lowerCAmelCase =o.T
_lowerCAmelCase =q.T
_lowerCAmelCase =v.T
# Block i, layer 1 (MLP).
_lowerCAmelCase =tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_mlp_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase =tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , __UpperCamelCase )
_lowerCAmelCase =layer_norm
if split_mlp_wi:
_lowerCAmelCase =wi[0].T
_lowerCAmelCase =wi[1].T
else:
_lowerCAmelCase =wi.T
_lowerCAmelCase =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase =tax_relpos_bias_lookup(
__UpperCamelCase , __UpperCamelCase , """encoder""" ).T
_lowerCAmelCase =old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
_lowerCAmelCase =tax_relpos_bias_lookup(
__UpperCamelCase , 0 , """encoder""" ).T
_lowerCAmelCase =tax_relpos_bias_lookup(
__UpperCamelCase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase =tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_self_attention_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """self_attention""" )
_lowerCAmelCase =layer_norm
_lowerCAmelCase =k.T
_lowerCAmelCase =o.T
_lowerCAmelCase =q.T
_lowerCAmelCase =v.T
# Block i, layer 1 (Cross Attention).
_lowerCAmelCase =tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """encoder_decoder_attention""" )
_lowerCAmelCase =layer_norm
_lowerCAmelCase =k.T
_lowerCAmelCase =o.T
_lowerCAmelCase =q.T
_lowerCAmelCase =v.T
# Block i, layer 2 (MLP).
_lowerCAmelCase =tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_mlp_layer_norm""" )
_lowerCAmelCase , _lowerCAmelCase =tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , __UpperCamelCase )
_lowerCAmelCase =layer_norm
if split_mlp_wi:
_lowerCAmelCase =wi[0].T
_lowerCAmelCase =wi[1].T
else:
_lowerCAmelCase =wi.T
_lowerCAmelCase =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase =tax_relpos_bias_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" ).T
_lowerCAmelCase =old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCAmelCase =old["""decoder/logits_dense/kernel"""].T
return new
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict:
_lowerCAmelCase =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase =state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase =state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
_lowerCAmelCase =state_dict["""shared.weight"""]
return state_dict
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
_lowerCAmelCase =checkpoints.load_tax_checkpoint(__UpperCamelCase )
_lowerCAmelCase =convert_tax_to_pytorch(
__UpperCamelCase , num_layers=config.num_layers , is_encoder_only=__UpperCamelCase , scalable_attention=__UpperCamelCase )
_lowerCAmelCase =make_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = False , ) -> List[str]:
_lowerCAmelCase =MTaConfig.from_json_file(__UpperCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCAmelCase =UMTaEncoderModel(__UpperCamelCase )
else:
_lowerCAmelCase =UMTaForConditionalGeneration(__UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__UpperCamelCase )
print("""Done""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
__A = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
"""simple docstring"""
import heapq
import sys
import numpy as np
__A = tuple[int, int]
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> str:
_lowerCAmelCase =[]
_lowerCAmelCase =set()
def _lowerCAmelCase ( self ) -> Tuple:
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _lowerCAmelCase ( self ) -> Optional[Any]:
return len(self.elements ) == 0
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__UpperCAmelCase )
else:
# update
# print("update", item)
_lowerCAmelCase =[]
((_lowerCAmelCase) , (_lowerCAmelCase)) =heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_lowerCAmelCase) , (_lowerCAmelCase)) =heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
if item in self.set:
self.set.remove(__UpperCAmelCase )
_lowerCAmelCase =[]
((_lowerCAmelCase) , (_lowerCAmelCase)) =heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_lowerCAmelCase) , (_lowerCAmelCase)) =heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowerCAmelCase ( self ) -> List[Any]:
return self.elements[0][1]
def _lowerCAmelCase ( self ) -> Union[str, Any]:
((_lowerCAmelCase) , (_lowerCAmelCase)) =heapq.heappop(self.elements )
self.set.remove(__UpperCAmelCase )
return (priority, item)
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
# euclidean distance
_lowerCAmelCase =np.array(__UpperCamelCase )
_lowerCAmelCase =np.array(__UpperCamelCase )
return np.linalg.norm(a - b )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
# integer division by time variable
return consistent_heuristic(__UpperCamelCase , __UpperCamelCase ) // t
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
_lowerCAmelCase =g_function[start] + Wa * heuristics[i](__UpperCamelCase , __UpperCamelCase )
return ans
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
_lowerCAmelCase =np.chararray((n, n) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
_lowerCAmelCase ="""*"""
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
_lowerCAmelCase ="""#"""
_lowerCAmelCase ="""-"""
_lowerCAmelCase =back_pointer[goal]
while x != start:
((_lowerCAmelCase) , (_lowerCAmelCase)) =x
# print(x)
_lowerCAmelCase ="""-"""
_lowerCAmelCase =back_pointer[x]
_lowerCAmelCase ="""-"""
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
_lowerCAmelCase =back_pointer[goal]
while x != start:
print(__UpperCamelCase , end=""" """ )
_lowerCAmelCase =back_pointer[x]
print(__UpperCamelCase )
sys.exit()
def _lowerCamelCase(__UpperCamelCase ) -> Dict:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> str:
for itera in range(__UpperCamelCase ):
open_list[itera].remove_element(__UpperCamelCase )
# print("s", s)
# print("j", j)
((_lowerCAmelCase) , (_lowerCAmelCase)) =s
_lowerCAmelCase =(x - 1, y)
_lowerCAmelCase =(x + 1, y)
_lowerCAmelCase =(x, y + 1)
_lowerCAmelCase =(x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__UpperCamelCase )
_lowerCAmelCase =-1
_lowerCAmelCase =float("""inf""" )
if valid(__UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
_lowerCAmelCase =g_function[s] + 1
_lowerCAmelCase =s
if neighbours not in close_list_anchor:
open_list[0].put(__UpperCamelCase , key(__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , __UpperCamelCase ):
if key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) <= Wa * key(
__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase ):
open_list[j].put(
__UpperCamelCase , key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
def _lowerCamelCase() -> str:
_lowerCAmelCase =[]
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__A = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__A = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__A = make_common_ground()
__A = blocks_blk
# hyper parameters
__A = 1
__A = 1
__A = 20
__A = 3 # one consistent and two other inconsistent
# start and end destination
__A = (0, 0)
__A = (n - 1, n - 1)
__A = 1
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
_lowerCAmelCase ={start: 0, goal: float("""inf""" )}
_lowerCAmelCase ={start: -1, goal: -1}
_lowerCAmelCase =[]
_lowerCAmelCase =set()
for i in range(__UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(__UpperCamelCase , key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , __UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
_lowerCAmelCase , _lowerCAmelCase =open_list[i].top_show()
visited.add(__UpperCamelCase )
expand_state(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
close_list_inad.append(__UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
_lowerCAmelCase =open_list[0].top_show()
visited.add(__UpperCamelCase )
expand_state(
__UpperCamelCase , 0 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
close_list_anchor.append(__UpperCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__UpperCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 341 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict:
_lowerCAmelCase ={doc: key_lines}
_lowerCAmelCase ={doc: sys_lines}
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowerCAmelCase =(conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
_lowerCAmelCase =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase =line.split()[5]
if not parse_col == "-":
_lowerCAmelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase =evaluate(
key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , )
return score
| 341 | 1 |
"""simple docstring"""
__A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def _lowerCamelCase(__UpperCamelCase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__UpperCamelCase )
_lowerCAmelCase ="""""".join(bin(__UpperCamelCase )[2:].zfill(8 ) for byte in data )
_lowerCAmelCase =len(__UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowerCAmelCase =b"""=""" * ((6 - len(__UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__UpperCamelCase ) % 6)
else:
_lowerCAmelCase =b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def _lowerCamelCase(__UpperCamelCase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =(
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__UpperCamelCase , __UpperCamelCase ):
try:
_lowerCAmelCase =encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
_lowerCAmelCase =encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowerCAmelCase =encoded_data[:-padding]
_lowerCAmelCase ="""""".join(
bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowerCAmelCase ="""""".join(
bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
_lowerCAmelCase =[
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__UpperCamelCase ) , 8 )
]
return bytes(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = XGLMConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =ffn_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Dict:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =TFXGLMModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
_lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase ="""left"""
# use different length sentences to test batching
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
_lowerCAmelCase =inputs["""input_ids"""]
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 341 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 341 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 341 | 1 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__A = datasets.logging.get_logger(__name__)
__A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
__A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
__A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict:
_lowerCAmelCase ={doc: key_lines}
_lowerCAmelCase ={doc: sys_lines}
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase )
if remove_nested:
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
_lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase ={}
_lowerCAmelCase =0
_lowerCAmelCase =0
for name, metric in metrics:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
_lowerCAmelCase =(conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
_lowerCAmelCase =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
_lowerCAmelCase =line.split()[5]
if not parse_col == "-":
_lowerCAmelCase =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]:
_lowerCAmelCase =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
_lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_lowerCAmelCase =evaluate(
key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , )
return score
| 341 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase ) -> bool:
_lowerCAmelCase =str(__UpperCamelCase )
return n == n[::-1]
def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str:
_lowerCAmelCase =0
for i in range(1 , __UpperCamelCase ):
if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 341 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def _lowerCamelCase(__UpperCamelCase ) -> List[List[ImageInput]]:
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCamelCase ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''pixel_values''']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_55 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =size if size is not None else {"""shortest_edge""": 2_24}
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_lowerCAmelCase =crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , param_name="""crop_size""" )
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =resample
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase =image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" in size:
_lowerCAmelCase =get_resize_output_image_size(__UpperCAmelCase , size["""shortest_edge"""] , default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
_lowerCAmelCase =(size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase =to_numpy_array(__UpperCAmelCase )
if do_resize:
_lowerCAmelCase =self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase )
if do_center_crop:
_lowerCAmelCase =self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase )
if do_rescale:
_lowerCAmelCase =self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase )
if do_normalize:
_lowerCAmelCase =self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase )
_lowerCAmelCase =to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase )
return image
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_lowerCAmelCase =crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase =get_size_dict(__UpperCAmelCase , param_name="""crop_size""" )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
_lowerCAmelCase =make_batched(__UpperCAmelCase )
_lowerCAmelCase =[
[
self._preprocess_image(
image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , )
for img in video
]
for video in videos
]
_lowerCAmelCase ={"""pixel_values""": videos}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 1 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _lowerCamelCase(__UpperCamelCase ) -> str:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
_lowerCAmelCase =precision
_lowerCAmelCase =ceil(precision / 14 )
_lowerCAmelCase =426880 * Decimal(10005 ).sqrt()
_lowerCAmelCase =1
_lowerCAmelCase =13591409
_lowerCAmelCase =Decimal(__UpperCamelCase )
for k in range(1 , __UpperCamelCase ):
_lowerCAmelCase =factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__A = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 341 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
| 341 | 1 |
"""simple docstring"""
from manim import *
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""CPU""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(4 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""GPU""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""Model""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__UpperCAmelCase )
_lowerCAmelCase =[]
for i, rect in enumerate(__UpperCAmelCase ):
rect.set_stroke(__UpperCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_lowerCAmelCase =Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__UpperCAmelCase , buff=0.0 )
self.add(__UpperCAmelCase )
cpu_targs.append(__UpperCAmelCase )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 )
_lowerCAmelCase =Text("""Loaded Checkpoint""" , font_size=24 )
_lowerCAmelCase =Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , aligned_edge=__UpperCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_lowerCAmelCase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_lowerCAmelCase =MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCAmelCase ) , Write(__UpperCAmelCase ) )
self.play(Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i, rect in enumerate(__UpperCAmelCase ):
_lowerCAmelCase =fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 )
target.move_to(__UpperCAmelCase )
first_animations.append(GrowFromCenter(__UpperCAmelCase , run_time=1 ) )
_lowerCAmelCase =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) )
self.play(*__UpperCAmelCase )
self.play(*__UpperCAmelCase )
self.wait()
| 341 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =decoder_seq_length
# For common tests
_lowerCAmelCase =self.decoder_seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =d_model
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =decoder_start_token_id
_lowerCAmelCase =use_cache
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =None
_lowerCAmelCase =decoder_seq_length
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCAmelCase =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
_lowerCAmelCase =True
_lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
_lowerCAmelCase =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_lowerCAmelCase =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""]
_lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""]
# select random slice
_lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase = True
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> List[Any]:
pass
def _lowerCAmelCase ( self ) -> Any:
pass
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ) -> str:
pass
| 341 | 1 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]:
if not conversation_id:
_lowerCAmelCase =uuid.uuida()
if past_user_inputs is None:
_lowerCAmelCase =[]
if generated_responses is None:
_lowerCAmelCase =[]
_lowerCAmelCase =conversation_id
_lowerCAmelCase =past_user_inputs
_lowerCAmelCase =generated_responses
_lowerCAmelCase =text
def __eq__( self , __UpperCAmelCase ) -> Optional[int]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ) -> Union[str, Any]:
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
_lowerCAmelCase =text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
_lowerCAmelCase =text
def _lowerCAmelCase ( self ) -> Optional[Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_lowerCAmelCase =None
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
self.generated_responses.append(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Optional[Any]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Dict:
_lowerCAmelCase =f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
_lowerCAmelCase ="""user""" if is_user else """bot"""
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__magic_name__ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
_lowerCAmelCase =self.tokenizer.eos_token
def _lowerCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase ={}
_lowerCAmelCase ={}
if min_length_for_response is not None:
_lowerCAmelCase =min_length_for_response
if minimum_tokens is not None:
_lowerCAmelCase =minimum_tokens
if "max_length" in generate_kwargs:
_lowerCAmelCase =generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_lowerCAmelCase =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ) -> Optional[int]:
_lowerCAmelCase =super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ) -> Dict[str, Any]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
_lowerCAmelCase =self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_lowerCAmelCase =self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
_lowerCAmelCase =torch.LongTensor([input_ids] )
elif self.framework == "tf":
_lowerCAmelCase =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ) -> Dict:
_lowerCAmelCase =generate_kwargs.get("""max_length""" , self.model.config.max_length )
_lowerCAmelCase =model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
_lowerCAmelCase =max_length - minimum_tokens
_lowerCAmelCase =model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
_lowerCAmelCase =model_inputs["""attention_mask"""][:, -trim:]
_lowerCAmelCase =model_inputs.pop("""conversation""" )
_lowerCAmelCase =max_length
_lowerCAmelCase =self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
_lowerCAmelCase =1
else:
_lowerCAmelCase =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =model_outputs["""output_ids"""]
_lowerCAmelCase =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
_lowerCAmelCase =model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict:
_lowerCAmelCase =self.tokenizer.eos_token_id
_lowerCAmelCase =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
_lowerCAmelCase =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 341 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = JukeboxTokenizer
lowerCamelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def _lowerCAmelCase ( self ) -> str:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self ) -> Any:
import torch
_lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_lowerCAmelCase =[
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> list[list[int]]:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =sum(__UpperCamelCase )
create_state_space_tree(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return result
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> None:
if sum(__UpperCamelCase ) > max_sum or (remaining_nums_sum + sum(__UpperCamelCase )) < max_sum:
return
if sum(__UpperCamelCase ) == max_sum:
result.append(__UpperCamelCase )
return
for index in range(__UpperCamelCase , len(__UpperCamelCase ) ):
create_state_space_tree(
__UpperCamelCase , __UpperCamelCase , index + 1 , [*path, nums[index]] , __UpperCamelCase , remaining_nums_sum - nums[index] , )
__A = [3, 34, 4, 12, 5, 2]
__A = 9
__A = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 341 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '▁'
__A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =vocab_file
_lowerCAmelCase =monolingual_vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCAmelCase ={}
_lowerCAmelCase =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
_lowerCAmelCase =line.strip().split()[0]
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
_lowerCAmelCase =len(self.fairseq_tokens_to_ids )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
_lowerCAmelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return len(self.fairseq_ids_to_tokens )
def _lowerCAmelCase ( self ) -> List[Any]:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.fairseq_ids_to_tokens[index]
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 341 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__A = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341 | 1 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
_lowerCAmelCase =sorted(string.lower() )
return len(__UpperCamelCase ) == len(set(__UpperCamelCase ) )
if __name__ == "__main__":
__A = input('Enter a string ').strip()
__A = is_isogram(input_str)
print(F"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCAmelCase =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 50 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __UpperCAmelCase ):
_lowerCAmelCase =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowerCAmelCase =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowerCAmelCase =randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase =self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCAmelCase =self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , eta=__UpperCAmelCase , use_clipped_model_output=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
_lowerCAmelCase =(image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase =self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 341 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.