code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 702 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A__ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def UpperCAmelCase__ ( ) -> Optional[Any]:
if os.name == "nt":
A_ = CursorInfo()
A_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__, ctypes.byref(UpperCAmelCase__ ) )
A_ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__, ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def UpperCAmelCase__ ( ) -> Tuple:
if os.name == "nt":
A_ = CursorInfo()
A_ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCAmelCase__, ctypes.byref(UpperCAmelCase__ ) )
A_ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCAmelCase__, ctypes.byref(UpperCAmelCase__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def UpperCAmelCase__ ( ) -> List[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 703 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 705 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 0 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
'''simple docstring'''
import os
def UpperCAmelCase__ ( ) -> Tuple:
with open(os.path.dirname(UpperCAmelCase__ ) + """/p022_names.txt""" ) as file:
A_ = str(file.readlines()[0] )
A_ = names.replace("""\"""", """""" ).split(""",""" )
names.sort()
A_ = 0
A_ = 0
for i, name in enumerate(UpperCAmelCase__ ):
for letter in name:
name_score += ord(UpperCAmelCase__ ) - 64
total_score += (i + 1) * name_score
A_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 707 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 0 |
'''simple docstring'''
import sys
__lowerCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase__ ( UpperCAmelCase__ = N ) -> int:
A_ = -sys.maxsize - 1
for i in range(len(UpperCAmelCase__ ) - 12 ):
A_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A_ = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 708 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "mgp-str"
def __init__( self , UpperCamelCase__=[32, 128] , UpperCamelCase__=4 , UpperCamelCase__=3 , UpperCamelCase__=27 , UpperCamelCase__=38 , UpperCamelCase__=50257 , UpperCamelCase__=30522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=4.0 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=False , UpperCamelCase__=0.02 , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = max_token_length
A_ = num_character_labels
A_ = num_bpe_labels
A_ = num_wordpiece_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = mlp_ratio
A_ = distilled
A_ = layer_norm_eps
A_ = drop_rate
A_ = qkv_bias
A_ = attn_drop_rate
A_ = drop_path_rate
A_ = output_aa_attentions
A_ = initializer_range
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Any:
'''simple docstring'''
A_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Optional[Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = embedding_size
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = TFMobileBertModel(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = TFMobileBertForMaskedLM(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = TFMobileBertForNextSentencePrediction(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = TFMobileBertForPreTraining(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self.num_labels
A_ = TFMobileBertForSequenceClassification(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.num_choices
A_ = TFMobileBertForMultipleChoice(config=UpperCamelCase__ )
A_ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.num_labels
A_ = TFMobileBertForTokenClassification(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = TFMobileBertForQuestionAnswering(config=UpperCamelCase__ )
A_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
A_ = TFMobileBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCamelCase__ )[0]
A_ = [1, 6, 30522]
self.assertEqual(output.shape , UpperCamelCase__ )
A_ = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
# load base model
A_ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__, torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A_ = load_file(UpperCAmelCase__ )
A_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A_ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
A_ = pipeline.text_encoder
else:
A_ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
A_ = pipeline.unet
# find the target layer
A_ = layer_infos.pop(0 )
while len(UpperCAmelCase__ ) > -1:
try:
A_ = curr_layer.__getattr__(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
A_ = layer_infos.pop(0 )
elif len(UpperCAmelCase__ ) == 0:
break
except Exception:
if len(UpperCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A_ = layer_infos.pop(0 )
A_ = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""", """lora_up""" ) )
pair_keys.append(UpperCAmelCase__ )
else:
pair_keys.append(UpperCAmelCase__ )
pair_keys.append(key.replace("""lora_up""", """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__, UpperCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
A_ = state_dict[pair_keys[0]].to(torch.floataa )
A_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__, UpperCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase__ )
return pipeline
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.base_model_path
__lowerCamelCase = args.checkpoint_path
__lowerCamelCase = args.dump_path
__lowerCamelCase = args.lora_prefix_unet
__lowerCamelCase = args.lora_prefix_text_encoder
__lowerCamelCase = args.alpha
__lowerCamelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__lowerCamelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=18 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , ) -> Dict:
'''simple docstring'''
A_ = size if size is not None else {"""shortest_edge""": 20}
A_ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = do_center_crop
A_ = crop_size
A_ = do_flip_channel_order
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class A__ ( _snake_case , unittest.TestCase ):
lowercase = MobileViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = MobileViTImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """center_crop""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_flip_channel_order""" ) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""", set() )
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
class A__ :
def __init__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = metric_id
class A__ :
lowercase = [MetricMock(_snake_case ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""", HfhMock() )
@pytest.mark.parametrize(
"""func, args""", [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
if "tmp_path" in args:
A_ = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(UpperCAmelCase__, match="""https://huggingface.co/docs/evaluate""" ):
func(*UpperCAmelCase__ )
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
'''simple docstring'''
import math
import sys
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if number != int(UpperCAmelCase__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
A_ = [-1] * (number + 1)
A_ = 0
for i in range(1, number + 1 ):
A_ = sys.maxsize
A_ = int(math.sqrt(UpperCAmelCase__ ) )
for j in range(1, root + 1 ):
A_ = 1 + answers[i - (j**2)]
A_ = min(UpperCAmelCase__, UpperCAmelCase__ )
A_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=False ) -> Tuple:
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
A_ = """"""
else:
A_ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
A_ = dct.pop(UpperCAmelCase__ )
A_ = val
def UpperCAmelCase__ ( ) -> Optional[Any]:
A_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = DeiTConfig()
# all deit models have fine-tuned heads
A_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A_ = 10_00
A_ = """huggingface/label-files"""
A_ = """imagenet-1k-id2label.json"""
A_ = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(deit_name[-6:-4] )
A_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A_ = 1_92
A_ = 7_68
A_ = 12
A_ = 3
elif deit_name[9:].startswith("""small""" ):
A_ = 3_84
A_ = 15_36
A_ = 12
A_ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A_ = 10_24
A_ = 40_96
A_ = 24
A_ = 16
# load original model from timm
A_ = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
A_ = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A_ = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A_ = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A_ = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A_ = image_processor(images=prepare_img(), return_tensors="""pt""" )
A_ = encoding["""pixel_values"""]
A_ = model(UpperCAmelCase__ )
A_ = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCamelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class A__ ( _snake_case ):
lowercase = "camembert"
def __init__( self , UpperCamelCase__=30522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class A__ ( _snake_case ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase__ ( UpperCAmelCase__=32, UpperCAmelCase__=10, UpperCAmelCase__=1_00, UpperCAmelCase__=10_26, UpperCAmelCase__=True, UpperCAmelCase__="data/tokenized_stories_train_wikitext103.jbl", UpperCAmelCase__="igf_context_pairs.jbl", ) -> List[str]:
set_seed(3 )
# generate train_data and objective_set
A_ , A_ = generate_datasets(
UpperCAmelCase__, UpperCAmelCase__, number=UpperCAmelCase__, min_len=10_26, trim=UpperCAmelCase__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
A_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
A_ = load_gpta("""gpt2""" ).to(UpperCAmelCase__ )
print("""computing perplexity on objective set""" )
A_ = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ).item()
print("""perplexity on objective set:""", UpperCAmelCase__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=15, UpperCAmelCase__=1_28, UpperCAmelCase__=1_00, UpperCAmelCase__="igf_model.pt", ) -> List[Any]:
set_seed(42 )
# Load pre-trained model
A_ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
A_ = SecondaryLearner(UpperCAmelCase__ )
# Train secondary learner
A_ = train_secondary_learner(
UpperCAmelCase__, UpperCAmelCase__, max_epochs=UpperCAmelCase__, batch_size=UpperCAmelCase__, eval_freq=1_00, igf_model_path=UpperCAmelCase__, )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=32, UpperCAmelCase__=10_00, UpperCAmelCase__=16, UpperCAmelCase__=1.0, UpperCAmelCase__=recopy_gpta, UpperCAmelCase__=None, UpperCAmelCase__=10, UpperCAmelCase__="gpt2_finetuned.pt", ) -> Optional[Any]:
A_ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
A_ = RandomSampler(UpperCAmelCase__ )
A_ = DataLoader(UpperCAmelCase__, sampler=UpperCAmelCase__ )
A_ = max_steps // (len(UpperCAmelCase__ )) + 1
A_ = 0
A_ = torch.zeros((1, context_len), dtype=torch.long, device=UpperCAmelCase__ )
A_ , A_ , A_ = recopy_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(UpperCAmelCase__ )
secondary_learner.eval()
A_ = []
A_ = 0
A_ = []
A_ = []
# Compute the performance of the transformer model at the beginning
A_ = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
test_perps.append(UpperCAmelCase__ )
print("""Test perplexity, step""", UpperCAmelCase__, """:""", UpperCAmelCase__ )
for epoch in range(int(UpperCAmelCase__ ) ):
for step, example in enumerate(UpperCAmelCase__ ):
torch.cuda.empty_cache()
A_ = random.randint(0, example.size(2 ) - context_len - 1 )
A_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
A_ = model(UpperCAmelCase__, labels=UpperCAmelCase__ )
A_ = True
if secondary_learner is not None:
A_ = secondary_learner.forward(
torch.tensor(UpperCAmelCase__, dtype=torch.long, device=UpperCAmelCase__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(UpperCAmelCase__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
A_ = -1
if predicted_q < threshold:
A_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
A_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
A_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
A_ = compute_perplexity(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
test_perps.append(UpperCAmelCase__ )
print("""Test perplexity, step""", UpperCAmelCase__, """:""", UpperCAmelCase__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict(), UpperCAmelCase__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase__ ( ) -> Optional[Any]:
A_ = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""The input data dir. Should contain data files for WikiText.""", )
parser.add_argument(
"""--model_name_or_path""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""Path to pretrained model or model identifier from huggingface.co/models""", )
parser.add_argument(
"""--data_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
), )
parser.add_argument(
"""--igf_data_file""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""A jbl file containing the context and information gain pairs to train secondary learner.""", )
parser.add_argument(
"""--output_dir""", default=UpperCAmelCase__, type=UpperCAmelCase__, required=UpperCAmelCase__, help="""The output directory where the final fine-tuned model is stored.""", )
parser.add_argument(
"""--tokenizer_name""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""Pretrained tokenizer name or path if not the same as model_name""", )
parser.add_argument("""--seed""", type=UpperCAmelCase__, default=UpperCAmelCase__, help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""", default=32, type=UpperCAmelCase__, help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
), )
parser.add_argument(
"""--size_objective_set""", default=1_00, type=UpperCAmelCase__, help="""number of articles that are long enough to be used as our objective set""", )
parser.add_argument(
"""--eval_freq""", default=1_00, type=UpperCAmelCase__, help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""", default=10_00, type=UpperCAmelCase__, help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""", default=1_28, type=UpperCAmelCase__, help="""batch size of training data for secondary learner""", )
parser.add_argument(
"""--batch_size""", default=16, type=UpperCAmelCase__, help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""", default=10, type=UpperCAmelCase__, help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
), )
parser.add_argument(
"""--number""", default=1_00, type=UpperCAmelCase__, help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""", default=10_26, type=UpperCAmelCase__, help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""", default=15, type=UpperCAmelCase__, help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""", default=1.0, type=UpperCAmelCase__, help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
), )
parser.add_argument("""--finetuned_model_name""", default="""gpt2_finetuned.pt""", type=UpperCAmelCase__, help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""", default=UpperCAmelCase__, type=UpperCAmelCase__, help="""Reset the model to the original pretrained GPT-2 weights after each iteration""", )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32, max_steps=10, size_objective_set=1_00, min_len=10_26, trim=UpperCAmelCase__, data_file="""data/tokenized_stories_train_wikitext103.jbl""", igf_data_file="""igf_context_pairs.jbl""", )
# Load train data for secondary learner
A_ = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
A_ = training_secondary_learner(
UpperCAmelCase__, secondary_learner_max_epochs=15, secondary_learner_batch_size=1_28, eval_freq=1_00, igf_model_path="""igf_model.pt""", )
# load pretrained gpt2 model
A_ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
A_ , A_ = generate_datasets(
context_len=32, file="""data/tokenized_stories_train_wikitext103.jbl""", number=1_00, min_len=10_26, trim=UpperCAmelCase__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, context_len=32, max_steps=10_00, batch_size=16, threshold=1.0, recopy_model=UpperCAmelCase__, secondary_learner=UpperCAmelCase__, eval_interval=10, finetuned_model_name="""gpt2_finetuned.pt""", )
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=100 , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = vocab_size
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = FlaxBeitModel(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = FlaxBeitForMaskedImageModeling(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = self.type_sequence_label_size
A_ = FlaxBeitForImageClassification(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ = 1
A_ = FlaxBeitForImageClassification(UpperCamelCase__ )
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class A__ ( _snake_case , unittest.TestCase ):
lowercase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def snake_case_ ( self ) -> None:
'''simple docstring'''
A_ = FlaxBeitModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCamelCase__ )
A_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
A_ = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ , **UpperCamelCase__ ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest("""JIT Enabled""" ):
A_ = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A_ = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
A_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( ) -> List[str]:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
A_ = np.ones((1, 196) , dtype=UpperCamelCase__ )
# forward pass
A_ = model(pixel_values=UpperCamelCase__ , bool_masked_pos=UpperCamelCase__ )
A_ = outputs.logits
# verify the logits
A_ = (1, 196, 8192)
self.assertEqual(logits.shape , UpperCamelCase__ )
A_ = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase__ , atol=1e-2 ) )
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""np""" )
# forward pass
A_ = model(**UpperCamelCase__ )
A_ = outputs.logits
# verify the logits
A_ = (1, 1000)
self.assertEqual(logits.shape , UpperCamelCase__ )
A_ = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
A_ = 281
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""np""" )
# forward pass
A_ = model(**UpperCamelCase__ )
A_ = outputs.logits
# verify the logits
A_ = (1, 21841)
self.assertEqual(logits.shape , UpperCamelCase__ )
A_ = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
A_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ )
| 718 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
A_ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
A_ = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""", UpperCAmelCase__ )
if matches:
A_ = float(matches[1] )
A_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
A_ = 10_01
A_ = """imagenet-1k-id2label.json"""
A_ = """huggingface/label-files"""
A_ = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A_ = {int(UpperCAmelCase__ ) + 1: v for k, v in idalabel.items()}
A_ = """background"""
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( ) -> int:
A_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False ) -> Optional[Any]:
A_ = get_mobilenet_va_config(UpperCAmelCase__ )
# Load 🤗 model
A_ = MobileNetVaForImageClassification(UpperCAmelCase__ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
A_ = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, )
A_ = image_processor(images=prepare_img(), return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = outputs.logits
assert logits.shape == (1, 10_01)
if model_name == "mobilenet_v1_1.0_224":
A_ = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
A_ = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
A_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3], UpperCAmelCase__, atol=1e-4 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
A_ = """google/""" + model_name
image_processor.push_to_hub(UpperCAmelCase__ )
model.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=None , ) -> Dict:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = ViTMSNModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.type_sequence_label_size
A_ = ViTMSNForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ = 1
A_ = ViTMSNForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowercase = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = ViTMSNModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCamelCase__ )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ViTMSNModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( ) -> Optional[Any]:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(2 )
A_ = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(UpperCamelCase__ )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A_ = model(**UpperCamelCase__ )
# verify the logits
A_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A_ = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( enum.Enum ):
lowercase = 0
lowercase = 1
@add_end_docstrings(_snake_case )
class A__ ( _snake_case ):
lowercase = "generated"
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
A_ = {}
if truncation is not None:
A_ = truncation
A_ = generate_kwargs
A_ = {}
if return_tensors is not None and return_type is None:
A_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
A_ = return_type
if clean_up_tokenization_spaces is not None:
A_ = clean_up_tokenization_spaces
if stop_sequence is not None:
A_ = self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
A_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return True
def snake_case_ ( self , *UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , UpperCamelCase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
A_ = ([prefix + arg for arg in args[0]],)
A_ = True
elif isinstance(args[0] , UpperCamelCase__ ):
A_ = (prefix + args[0],)
A_ = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
A_ = self.tokenizer(*UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
if (
isinstance(args[0] , UpperCamelCase__ )
and all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for el in args[0] )
and all(len(UpperCamelCase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self._parse_and_tokenize(UpperCamelCase__ , truncation=UpperCamelCase__ , **UpperCamelCase__ )
return inputs
def snake_case_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
if self.framework == "pt":
A_ , A_ = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
A_ , A_ = tf.shape(model_inputs["""input_ids"""] ).numpy()
A_ = generate_kwargs.get("""min_length""" , self.model.config.min_length )
A_ = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(UpperCamelCase__ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
A_ = self.model.generate(**UpperCamelCase__ , **UpperCamelCase__ )
A_ = output_ids.shape[0]
if self.framework == "pt":
A_ = output_ids.reshape(UpperCamelCase__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
A_ = tf.reshape(UpperCamelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=ReturnType.TEXT , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
A_ = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
A_ = {
f'''{self.return_name}_text''': self.tokenizer.decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
}
records.append(UpperCamelCase__ )
return records
@add_end_docstrings(_snake_case )
class A__ ( _snake_case ):
lowercase = "summary"
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(_snake_case )
class A__ ( _snake_case ):
lowercase = "translation"
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def snake_case_ ( self , *UpperCamelCase__ , UpperCamelCase__=TruncationStrategy.DO_NOT_TRUNCATE , UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , UpperCamelCase__ ):
return self.tokenizer._build_translation_inputs(
*UpperCamelCase__ , return_tensors=self.framework , truncation=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ )
else:
return super()._parse_and_tokenize(*UpperCamelCase__ , truncation=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ , A_ , A_ = super()._sanitize_parameters(**UpperCamelCase__ )
if src_lang is not None:
A_ = src_lang
if tgt_lang is not None:
A_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
A_ = kwargs.get("""task""" , self.task )
A_ = task.split("""_""" )
if task and len(UpperCamelCase__ ) == 4:
# translation, XX, to YY
A_ = items[1]
A_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 721 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A__ ( _snake_case ):
@staticmethod
@abstractmethod
def snake_case_ ( UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
| 700 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
def UpperCAmelCase__ ( *UpperCAmelCase__, **UpperCAmelCase__ ) -> int:
requires_backends(UpperCAmelCase__, ["""torch"""] )
def UpperCAmelCase__ ( *UpperCAmelCase__, **UpperCAmelCase__ ) -> List[str]:
requires_backends(UpperCAmelCase__, ["""torch"""] )
def UpperCAmelCase__ ( *UpperCAmelCase__, **UpperCAmelCase__ ) -> int:
requires_backends(UpperCAmelCase__, ["""torch"""] )
def UpperCAmelCase__ ( *UpperCAmelCase__, **UpperCAmelCase__ ) -> str:
requires_backends(UpperCAmelCase__, ["""torch"""] )
def UpperCAmelCase__ ( *UpperCAmelCase__, **UpperCAmelCase__ ) -> int:
requires_backends(UpperCAmelCase__, ["""torch"""] )
def UpperCAmelCase__ ( *UpperCAmelCase__, **UpperCAmelCase__ ) -> Any:
requires_backends(UpperCAmelCase__, ["""torch"""] )
def UpperCAmelCase__ ( *UpperCAmelCase__, **UpperCAmelCase__ ) -> Any:
requires_backends(UpperCAmelCase__, ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
class A__ ( metaclass=_snake_case ):
lowercase = ["torch"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
@classmethod
def snake_case_ ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ["""torch"""] )
| 701 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import argparse
import copy
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = {}
with open(UpperCAmelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
A_ = []
_list.append([line.split()[1], line.split()[2]] )
A_ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
A_ = []
_list.append([line.split()[0], line.split()[2]] )
A_ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
with open(UpperCAmelCase__ ) as f:
A_ = f.read(1 )
A_ = start_node
A_ = []
A_ = start_node
A_ = 0
while visiting not in first_solution:
A_ = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCAmelCase__ ) and k[0] not in first_solution:
A_ = k[1]
A_ = k[0]
first_solution.append(UpperCAmelCase__ )
A_ = distance_of_first_solution + int(UpperCAmelCase__ )
A_ = best_node
first_solution.append(UpperCAmelCase__ )
A_ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
A_ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for n in solution[1:-1]:
A_ = solution.index(UpperCAmelCase__ )
for kn in solution[1:-1]:
A_ = solution.index(UpperCAmelCase__ )
if n == kn:
continue
A_ = copy.deepcopy(UpperCAmelCase__ )
A_ = kn
A_ = n
A_ = 0
for k in _tmp[:-1]:
A_ = _tmp[_tmp.index(UpperCAmelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
A_ = distance + int(i[1] )
_tmp.append(UpperCAmelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
A_ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCAmelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
A_ = 1
A_ = first_solution
A_ = []
A_ = distance_of_first_solution
A_ = solution
while count <= iters:
A_ = find_neighborhood(UpperCAmelCase__, UpperCAmelCase__ )
A_ = 0
A_ = neighborhood[index_of_best_solution]
A_ = len(UpperCAmelCase__ ) - 1
A_ = False
while not found:
A_ = 0
while i < len(UpperCAmelCase__ ):
if best_solution[i] != solution[i]:
A_ = best_solution[i]
A_ = solution[i]
break
A_ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
A_ = True
A_ = best_solution[:-1]
A_ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
A_ = cost
A_ = solution
else:
A_ = index_of_best_solution + 1
A_ = neighborhood[index_of_best_solution]
if len(UpperCAmelCase__ ) >= size:
tabu_list.pop(0 )
A_ = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase__ ( UpperCAmelCase__=None ) -> Dict:
A_ = generate_neighbours(args.File )
A_ , A_ = generate_first_solution(
args.File, UpperCAmelCase__ )
A_ , A_ = tabu_search(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, args.Iterations, args.Size, )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 702 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 703 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> np.array:
A_ = int(np.ceil((x_end - xa) / step_size ) )
A_ = np.zeros((n + 1,) )
A_ = ya
A_ = xa
for k in range(UpperCAmelCase__ ):
A_ = y[k] + step_size * ode_func(UpperCAmelCase__, y[k] )
A_ = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__, y[k] ) + ode_func(x + step_size, UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__lowerCamelCase = logging.get_logger(__name__)
class A__ :
lowercase = None
@experimental
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return _map_with_joblib(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
A_ = num_proc if num_proc <= len(UpperCAmelCase__ ) else len(UpperCAmelCase__ )
A_ = [] # We organize the splits ourselve (contiguous splits)
for index in range(UpperCAmelCase__ ):
A_ = len(UpperCAmelCase__ ) // num_proc
A_ = len(UpperCAmelCase__ ) % num_proc
A_ = div * index + min(UpperCAmelCase__, UpperCAmelCase__ )
A_ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(UpperCAmelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'''Error dividing inputs iterable among processes. '''
F'''Total number of objects {len(UpperCAmelCase__ )}, '''
F'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
F'''Spawning {num_proc} processes for {len(UpperCAmelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
A_ , A_ = None, None
if not disable_tqdm:
A_ , A_ = (RLock(),), tqdm.set_lock
with Pool(UpperCAmelCase__, initargs=UpperCAmelCase__, initializer=UpperCAmelCase__ ) as pool:
A_ = pool.map(UpperCAmelCase__, UpperCAmelCase__ )
logger.info(F'''Finished {num_proc} processes''' )
A_ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'''Unpacked {len(UpperCAmelCase__ )} objects''' )
return mapped
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=UpperCAmelCase__ ):
return joblib.Parallel()(
joblib.delayed(UpperCAmelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
A_ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
A_ = None
| 705 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = """hf-internal-testing/tiny-random-t5"""
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
A_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
A_ = tokenizer("""This is me""" , return_tensors="""pt""" )
A_ = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
A_ = model.generate(**UpperCamelCase__ )
A_ = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
A_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
A_ = model_reloaded.generate(**UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = """hf-internal-testing/tiny-random-t5"""
A_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
A_ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase__ ):
model.save_pretrained(UpperCamelCase__ )
A_ = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase__ )
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''ConvNextFeatureExtractor''']
__lowerCamelCase = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 707 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase__, n - 1, UpperCAmelCase__ ) * a) % mod
else:
A_ = binary_exponentiation(UpperCAmelCase__, n / 2, UpperCAmelCase__ )
return (b * b) % mod
# a prime number
__lowerCamelCase = 701
__lowerCamelCase = 10_0000_0000
__lowerCamelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 708 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class A__ ( _snake_case ):
@require_torch
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
A_ = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
A_ = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
A_ = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(UpperCamelCase__ )
BertModel.from_pretrained(UpperCamelCase__ )
BertTokenizer.from_pretrained(UpperCamelCase__ )
pipeline(task="""fill-mask""" , model=UpperCamelCase__ )
# baseline - just load from_pretrained with normal network
A_ = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
A_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ = """1"""
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
A_ = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
A_ = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
A_ = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(UpperCamelCase__ )
BertModel.from_pretrained(UpperCamelCase__ )
BertTokenizer.from_pretrained(UpperCamelCase__ )
pipeline(task="""fill-mask""" , model=UpperCamelCase__ )
# baseline - just load from_pretrained with normal network
A_ = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
A_ = self.get_env()
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
A_ = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
A_ = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
A_ = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
A_ = self.get_env()
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
A_ = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ = """1"""
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """
from transformers import pipeline
"""
A_ = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
A_ = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
A_ = self.get_env()
A_ = """1"""
A_ = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = """
from transformers import AutoModel
"""
A_ = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
A_ = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
A_ = self.get_env()
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
A_ = """1"""
A_ = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__, 2 ) - pow(UpperCAmelCase__, 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__, 2 ) - pow(UpperCAmelCase__, 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__, 2 ) + pow(UpperCAmelCase__, 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 1_00, ) -> float:
A_ = x_start
A_ = fnc(UpperCAmelCase__ )
A_ = 0.0
for _ in range(UpperCAmelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
A_ = (x_end - x_start) / steps + xa
A_ = fnc(UpperCAmelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
A_ = xa
A_ = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
__lowerCamelCase = 10
while i <= 10_0000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
A_ = str(bin(UpperCAmelCase__ ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
A_ = str(bin(UpperCAmelCase__ ) )[2:]
if shift_amount >= len(UpperCAmelCase__ ):
return "0b0"
A_ = binary_number[: len(UpperCAmelCase__ ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
if number >= 0: # Get binary representation of positive number
A_ = """0""" + str(bin(UpperCAmelCase__ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
A_ = len(bin(UpperCAmelCase__ )[3:] ) # Find 2's complement of number
A_ = bin(abs(UpperCAmelCase__ ) - (1 << binary_number_length) )[3:]
A_ = (
"""1""" + """0""" * (binary_number_length - len(UpperCAmelCase__ )) + binary_number
)
if shift_amount >= len(UpperCAmelCase__ ):
return "0b" + binary_number[0] * len(UpperCAmelCase__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCAmelCase__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=3 , UpperCamelCase__=32 , UpperCamelCase__=3 , UpperCamelCase__=10 , UpperCamelCase__=[10, 20, 30, 40] , UpperCamelCase__=[1, 1, 2, 1] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=3 , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = embeddings_size
A_ = hidden_sizes
A_ = depths
A_ = is_training
A_ = use_labels
A_ = hidden_act
A_ = num_labels
A_ = scope
A_ = len(UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> int:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = TFResNetModel(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = TFResNetForImageClassification(UpperCamelCase__ )
A_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = TFResNetModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def snake_case_ ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCamelCase__ )
A_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A_ = model_class(UpperCamelCase__ )
A_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ = layer_type
A_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFResNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( ) -> str:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# forward pass
A_ = model(**UpperCamelCase__ )
# verify the logits
A_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A_ = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase__ , atol=1e-4 ) )
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
import baseaa
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bytes:
return baseaa.baaencode(string.encode("""utf-8""" ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
return baseaa.baadecode(UpperCAmelCase__ ).decode("""utf-8""" )
if __name__ == "__main__":
__lowerCamelCase = '''Hello World!'''
__lowerCamelCase = baseaa_encode(test)
print(encoded)
__lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = None, UpperCAmelCase__ = None, UpperCAmelCase__ = None, ) -> List[Any]:
if config_name_or_path is None:
A_ = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
A_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
A_ = question_encoder_name_or_path
A_ = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
A_ = RagConfig.from_pretrained(UpperCAmelCase__ )
A_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
A_ = AutoConfig.from_pretrained(UpperCAmelCase__ )
A_ = gen_config
A_ = question_encoder_config
A_ = model_class.from_pretrained_question_encoder_generator(
UpperCAmelCase__, UpperCAmelCase__, config=UpperCAmelCase__ )
rag_model.save_pretrained(UpperCAmelCase__ )
# Sanity check.
model_class.from_pretrained(UpperCAmelCase__ )
# Save tokenizers.
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = 0
@slow
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(UpperCamelCase__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(UpperCamelCase__ ) , 0 )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
# Check that tokenizer_type ≠ model_type
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase__ , """vocab.txt""" ) )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""bert""" , use_fast=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase__ , """merges.txt""" ) )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""gpt2""" , use_fast=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@require_tokenizers
def snake_case_ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(UpperCamelCase__ , """vocab.txt""" ) )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""bert""" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(UpperCamelCase__ , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(UpperCamelCase__ , """merges.txt""" ) )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , tokenizer_type="""gpt2""" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A_ = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase__ )
else:
self.assertEqual(tokenizer.do_lower_case , UpperCamelCase__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
UpperCamelCase__ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
A_ = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = TOKENIZER_MAPPING.values()
A_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(UpperCamelCase__ )
@require_tokenizers
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , UpperCamelCase__ )
@require_tokenizers
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=UpperCamelCase__ )
A_ = """Hello, world. How are you?"""
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertEqual("""[UNK]""" , tokens[0] )
A_ = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=UpperCamelCase__ )
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = get_tokenizer_config("""bert-base-cased""" )
A_ = config.pop("""_commit_hash""" , UpperCamelCase__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(UpperCamelCase__ , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A_ = get_tokenizer_config(UpperCamelCase__ )
self.assertDictEqual(UpperCamelCase__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = get_tokenizer_config(UpperCamelCase__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def snake_case_ ( self ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
A_ = CustomTokenizer.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCamelCase__ )
# Can register in two steps
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase__ ):
AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = BertTokenizerFast.from_pretrained(UpperCamelCase__ )
bert_tokenizer.save_pretrained(UpperCamelCase__ )
A_ = CustomTokenizerFast.from_pretrained(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase__ ):
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase__ ):
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase__ )
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def snake_case_ ( self ) -> Any:
'''simple docstring'''
class A__ ( _snake_case ):
lowercase = False
class A__ ( _snake_case ):
lowercase = NewTokenizer
lowercase = False
try:
AutoConfig.register("""custom""" , UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , slow_tokenizer_class=UpperCamelCase__ )
AutoTokenizer.register(UpperCamelCase__ , fast_tokenizer_class=UpperCamelCase__ )
# If remote code is not set, the default is to use local
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
A_ = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=UpperCamelCase__ , use_fast=UpperCamelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
A_ = AutoTokenizer.from_pretrained("""bert-base""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCamelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , revision="""aaaaaa""" )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
A_ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 717 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 0 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A__ :
def __init__( self , UpperCamelCase__ = "cpu" , UpperCamelCase__ = "openai/clip-vit-large-patch14" ) -> None:
'''simple docstring'''
A_ = device
A_ = CLIPTokenizerFast.from_pretrained(UpperCamelCase__ )
A_ = [0.48145466, 0.4578275, 0.40821073]
A_ = [0.26862954, 0.26130258, 0.27577711]
A_ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
A_ = torchvision.transforms.Resize(224 )
A_ = torchvision.transforms.CenterCrop(224 )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = self.resize(UpperCamelCase__ )
A_ = self.center_crop(UpperCamelCase__ )
A_ = self.normalize(UpperCamelCase__ )
return images
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self.tokenizer(text=UpperCamelCase__ , **UpperCamelCase__ )
A_ = self.preprocess_img(UpperCamelCase__ )
A_ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=10 , UpperCamelCase__=0.01 , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__="image" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ) -> None:
'''simple docstring'''
super().__init__()
A_ = None
A_ = device if device else get_device()
if vqgan:
A_ = vqgan
else:
A_ = load_vqgan(self.device , conf_path=UpperCamelCase__ , ckpt_path=UpperCamelCase__ )
self.vqgan.eval()
if clip:
A_ = clip
else:
A_ = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
A_ = ProcessorGradientFlow(device=self.device )
A_ = iterations
A_ = lr
A_ = log
A_ = make_grid
A_ = return_val
A_ = quantize
A_ = self.vqgan.decoder.z_shape
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=5 , UpperCamelCase__=True ) -> List[str]:
'''simple docstring'''
A_ = []
if output_path is None:
A_ = """./animation.gif"""
if input_path is None:
A_ = self.save_path
A_ = sorted(glob(input_path + """/*""" ) )
if not len(UpperCamelCase__ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(UpperCamelCase__ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
A_ = total_duration / len(UpperCamelCase__ )
A_ = [frame_duration] * len(UpperCamelCase__ )
if extend_frames:
A_ = 1.5
A_ = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(UpperCamelCase__ ) )
imageio.mimsave(UpperCamelCase__ , UpperCamelCase__ , duration=UpperCamelCase__ )
print(f'''gif saved to {output_path}''' )
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
A_ = preprocess(Image.open(UpperCamelCase__ ) , target_image_size=256 ).to(self.device )
A_ = preprocess_vqgan(UpperCamelCase__ )
A_ , *A_ = self.vqgan.encode(UpperCamelCase__ )
return z
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.latent.detach().requires_grad_()
A_ = base_latent + transform_vector
if self.quantize:
A_ , *A_ = self.vqgan.quantize(UpperCamelCase__ )
else:
A_ = trans_latent
return self.vqgan.decode(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.clip_preprocessor(text=UpperCamelCase__ , images=UpperCamelCase__ , return_tensors="""pt""" , padding=UpperCamelCase__ )
A_ = self.clip(**UpperCamelCase__ )
A_ = clip_outputs.logits_per_image
if weights is not None:
A_ = similarity_logits * weights
return similarity_logits.sum()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self._get_clip_similarity(pos_prompts["""prompts"""] , UpperCamelCase__ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
A_ = self._get_clip_similarity(neg_prompts["""prompts"""] , UpperCamelCase__ , weights=neg_prompts["""weights"""] )
else:
A_ = torch.tensor([1] , device=self.device )
A_ = -torch.log(UpperCamelCase__ ) + torch.log(UpperCamelCase__ )
return loss
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = torch.randn_like(self.latent , requires_grad=UpperCamelCase__ , device=self.device )
A_ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A_ = self._add_vector(UpperCamelCase__ )
A_ = loop_post_process(UpperCamelCase__ )
A_ = self._get_CLIP_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print("""CLIP loss""" , UpperCamelCase__ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
wandb.init(reinit=UpperCamelCase__ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
A_ = Image.open(UpperCamelCase__ )
A_ = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(UpperCamelCase__ ) )
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if not prompts:
return []
A_ = []
A_ = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(UpperCamelCase__ , (tuple, list) ):
A_ = prompt[0]
A_ = float(prompt[1] )
elif ":" in prompt:
A_ , A_ = prompt.split(""":""" )
A_ = float(UpperCamelCase__ )
else:
A_ = prompt
A_ = 1.0
processed_prompts.append(UpperCamelCase__ )
weights.append(UpperCamelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase__ , device=self.device ),
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=None , ) -> Tuple:
'''simple docstring'''
if image_path:
A_ = self._get_latent(UpperCamelCase__ )
else:
A_ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
A_ = self.process_prompts(UpperCamelCase__ )
A_ = self.process_prompts(UpperCamelCase__ )
if save_final and save_path is None:
A_ = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
A_ = save_path + """_""" + get_timestamp()
os.makedirs(UpperCamelCase__ )
A_ = save_path
A_ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(UpperCamelCase__ ) )
A_ = loop_post_process(UpperCamelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ):
if show_intermediate:
show_pil(UpperCamelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(UpperCamelCase__ )} )
if show_final:
show_pil(UpperCamelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 718 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import requests
__lowerCamelCase = '''''' # <-- Put your OpenWeatherMap appid here!
__lowerCamelCase = '''https://api.openweathermap.org/data/2.5/'''
def UpperCAmelCase__ ( UpperCAmelCase__ = "Chicago", UpperCAmelCase__ = APPID ) -> dict:
return requests.get(URL_BASE + """weather""", params=locals() ).json()
def UpperCAmelCase__ ( UpperCAmelCase__ = "Kolkata, India", UpperCAmelCase__ = APPID ) -> dict:
return requests.get(URL_BASE + """forecast""", params=locals() ).json()
def UpperCAmelCase__ ( UpperCAmelCase__ = 55.68, UpperCAmelCase__ = 12.57, UpperCAmelCase__ = APPID ) -> dict:
return requests.get(URL_BASE + """onecall""", params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowerCamelCase = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class A__ ( _snake_case ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(UpperCamelCase__ )
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = {}
A_ = {}
A_ = {}
# preprocess args
if "points_per_batch" in kwargs:
A_ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
A_ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
A_ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
A_ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
A_ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
A_ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
A_ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
A_ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
A_ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
A_ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
A_ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
A_ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , UpperCamelCase__ , *UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return super().__call__(UpperCamelCase__ , *UpperCamelCase__ , num_workers=UpperCamelCase__ , batch_size=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=64 , UpperCamelCase__ = 0 , UpperCamelCase__ = 512 / 1500 , UpperCamelCase__ = 32 , UpperCamelCase__ = 1 , ) -> List[Any]:
'''simple docstring'''
A_ = load_image(UpperCamelCase__ )
A_ = self.image_processor.size["""longest_edge"""]
A_ , A_ , A_ , A_ = self.image_processor.generate_crop_boxes(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = self.image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
A_ = self.get_inference_context()
with inference_context():
A_ = self._ensure_tensor_on_device(UpperCamelCase__ , device=self.device )
A_ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
A_ = image_embeddings
A_ = grid_points.shape[1]
A_ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
A_ = grid_points[:, i : i + points_per_batch, :, :]
A_ = input_labels[:, i : i + points_per_batch]
A_ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0.88 , UpperCamelCase__=0.95 , UpperCamelCase__=0 , UpperCamelCase__=1 , ) -> Optional[Any]:
'''simple docstring'''
A_ = model_inputs.pop("""input_boxes""" )
A_ = model_inputs.pop("""is_last""" )
A_ = model_inputs.pop("""original_sizes""" ).tolist()
A_ = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
A_ = self.model(**UpperCamelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A_ = model_outputs["""pred_masks"""]
A_ = self.image_processor.post_process_masks(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , binarize=UpperCamelCase__ )
A_ = model_outputs["""iou_scores"""]
A_ , A_ , A_ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=0.7 , ) -> str:
'''simple docstring'''
A_ = []
A_ = []
A_ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
A_ = torch.cat(UpperCamelCase__ )
A_ = torch.cat(UpperCamelCase__ )
A_ , A_ , A_ , A_ = self.image_processor.post_process_for_mask_generation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = defaultdict(UpperCamelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCamelCase__ )
A_ = {}
if output_rle_mask:
A_ = rle_mask
if output_bboxes_mask:
A_ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class A__ ( _snake_case ):
lowercase = "open-llama"
def __init__( self , UpperCamelCase__=100000 , UpperCamelCase__=4096 , UpperCamelCase__=11008 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=2048 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-6 , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = intermediate_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = initializer_range
A_ = rms_norm_eps
A_ = use_cache
A_ = kwargs.pop(
"""use_memorry_efficient_attention""" , UpperCamelCase__ )
A_ = hidden_dropout_prob
A_ = attention_dropout_prob
A_ = use_stable_embedding
A_ = shared_input_output_embedding
A_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ , )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
A_ = self.rope_scaling.get("""type""" , UpperCamelCase__ )
A_ = self.rope_scaling.get("""factor""" , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 721 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 700 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 0 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = '''▁'''
__lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__lowerCamelCase = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__lowerCamelCase = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__lowerCamelCase = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__lowerCamelCase = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class A__ ( _snake_case ):
lowercase = ["input_ids"]
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = RESOURCE_FILES_NAMES
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__="utf8" , UpperCamelCase__="[UNK]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="[PAD]" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
'''simple docstring'''
A_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , vocab_file=UpperCamelCase__ , encoding=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A_ = do_lower_case
A_ = sentencepiece_model_ckpt
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A_ = self.load_vocab(filepath=UpperCamelCase__ )
else:
A_ = {self.sp_model.id_to_piece(UpperCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
A_ = {v: k for k, v in self.vocab.items()}
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if text is None:
return None
A_ = self.tokenize(UpperCamelCase__ )
A_ , A_ = """""", []
for i, ch in enumerate(UpperCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
A_ = self.SP_CHAR_MAPPING.get(UpperCamelCase__ )
else:
A_ = unicodedata.normalize("""NFKC""" , UpperCamelCase__ )
if self.is_whitespace(UpperCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase__ ) )
A_ , A_ , A_ = normalized_text, [], 0
if self.do_lower_case:
A_ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A_ = token[1:]
A_ = text[offset:].index(UpperCamelCase__ ) + offset
A_ = start + len(UpperCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A_ = end
return token_mapping
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.vocab )
def snake_case_ ( self ) -> int:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> List[str]:
'''simple docstring'''
A_ = self.__dict__.copy()
A_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ = {}
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase__ , UpperCamelCase__ ) for c in text) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=64 , UpperCamelCase__=0.1 ) -> List[Any]:
'''simple docstring'''
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
A_ = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
A_ = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
A_ = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
A_ = self.sp_model.EncodeAsPieces(UpperCamelCase__ )
else:
A_ = self.sp_model.SampleEncodeAsPieces(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = []
for pi, piece in enumerate(UpperCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase__ ) and pi != 0:
new_pieces.append(UpperCamelCase__ )
continue
else:
continue
A_ = 0
for i, chunk in enumerate(UpperCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase__ ) or self.is_punct(UpperCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase__ )
A_ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A_ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A_ = i
if len(UpperCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = """""".join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = self.convert_ids_to_tokens(UpperCamelCase__ )
A_ = """""".join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.reverse_vocab.get(UpperCamelCase__ , self.unk_token )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase__ ) + 1) + [1] * (len(UpperCamelCase__ ) + 3)
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase__ ) == 1:
A_ = unicodedata.category(UpperCamelCase__ )
if cat == "Zs":
return True
return False
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = {}
with io.open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCamelCase__ ):
A_ = line.rstrip("""\n""" )
A_ = int(UpperCamelCase__ )
return token_to_idx
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
A_ = 0
if os.path.isdir(UpperCamelCase__ ):
A_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
A_ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
A_ = token_index
writer.write(token + """\n""" )
index += 1
A_ = os.path.join(UpperCamelCase__ , """sentencepiece.bpe.model""" )
with open(UpperCamelCase__ , """wb""" ) as fi:
A_ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (vocab_file,)
| 701 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCamelCase = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
for param, grad_param in zip(model_a.parameters(), model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=True ) -> Any:
model.train()
A_ = model(UpperCAmelCase__ )
A_ = F.mse_loss(UpperCAmelCase__, target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=False ) -> List[Any]:
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(UpperCAmelCase__ )
A_ = RegressionDataset(length=80 )
A_ = DataLoader(UpperCAmelCase__, batch_size=16 )
model.to(accelerator.device )
if sched:
A_ = AdamW(params=model.parameters(), lr=1e-3 )
A_ = AdamW(params=ddp_model.parameters(), lr=1e-3 )
A_ = LambdaLR(UpperCAmelCase__, lr_lambda=lambda UpperCAmelCase__ : epoch**0.65 )
A_ = LambdaLR(UpperCAmelCase__, lr_lambda=lambda UpperCAmelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
A_ , A_ , A_ , A_ = accelerator.prepare(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ , A_ = accelerator.prepare(UpperCAmelCase__, UpperCAmelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# Test when on a single CPU or GPU that the context manager does nothing
A_ , A_ , A_ = get_training_setup(UpperCAmelCase__ )
# Use a single batch
A_ , A_ = next(iter(UpperCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A_ , A_ = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase__ ):
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
# Sync grads
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad, ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
A_ = ddp_input[torch.randperm(len(UpperCAmelCase__ ) )]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
# Test on distributed setup that context manager behaves properly
A_ , A_ , A_ = get_training_setup(UpperCAmelCase__ )
# Use a single batch
A_ , A_ = next(iter(UpperCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A_ , A_ = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase__ ):
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
# Sync grads
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
A_ = ddp_input[torch.randperm(len(UpperCAmelCase__ ) )]
def UpperCAmelCase__ ( UpperCAmelCase__=False, UpperCAmelCase__=False ) -> int:
A_ = Accelerator(
split_batches=UpperCAmelCase__, dispatch_batches=UpperCAmelCase__, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A_ , A_ , A_ = get_training_setup(UpperCAmelCase__ )
for iteration, batch in enumerate(UpperCAmelCase__ ):
A_ , A_ = batch.values()
# Gather the distributed inputs and targs for the base model
A_ , A_ = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCAmelCase__ ):
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad, ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
A_ = ddp_input[torch.randperm(len(UpperCAmelCase__ ) )]
GradientState._reset_state()
def UpperCAmelCase__ ( UpperCAmelCase__=False, UpperCAmelCase__=False ) -> str:
A_ = Accelerator(
split_batches=UpperCAmelCase__, dispatch_batches=UpperCAmelCase__, gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A_ , A_ , A_ , A_ , A_ , A_ , A_ = get_training_setup(UpperCAmelCase__, UpperCAmelCase__ )
for iteration, batch in enumerate(UpperCAmelCase__ ):
A_ , A_ = batch.values()
# Gather the distributed inputs and targs for the base model
A_ , A_ = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCAmelCase__ ):
step_model(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
A_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def UpperCAmelCase__ ( ) -> Any:
A_ = Accelerator()
A_ = RegressionDataset(length=80 )
A_ = DataLoader(UpperCAmelCase__, batch_size=16 )
A_ = RegressionDataset(length=96 )
A_ = DataLoader(UpperCAmelCase__, batch_size=16 )
A_ , A_ = accelerator.prepare(UpperCAmelCase__, UpperCAmelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase__ )
if iteration < len(UpperCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase__ )
if batch_num < len(UpperCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCAmelCase__ ( ) -> Dict:
A_ = Accelerator()
A_ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCAmelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCAmelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation(UpperCAmelCase__, UpperCAmelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""", """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, """`split_batches=False`, `dispatch_batches=False`**""", )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """, F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''', )
test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 0 |
'''simple docstring'''
import itertools
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ( ) -> Optional[Any]:
A_ = 2
while True:
if is_prime(UpperCAmelCase__ ):
yield num
num += 1
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_01 ) -> int:
return next(itertools.islice(prime_generator(), nth - 1, UpperCAmelCase__ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (EulerDiscreteScheduler,)
lowercase = 10
def snake_case_ ( self , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self ) -> str:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="""v_prediction""" )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ , use_karras_sigmas=UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
A_ = torch.manual_seed(0 )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A_ = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 705 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaImgaImgPipeline
lowercase = ["image_embeds", "negative_image_embeds", "image"]
lowercase = [
"image_embeds",
"negative_image_embeds",
"image",
]
lowercase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
A_ = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.dummy_unet
A_ = self.dummy_movq
A_ = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A_ = DDIMScheduler(**UpperCamelCase__ )
A_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> List[str]:
'''simple docstring'''
A_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.images
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
A_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A_ = """A red cartoon frog, 4k"""
A_ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
A_ = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
A_ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ , A_ = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A_ = pipeline(
image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
A_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=UpperCamelCase__ , vae=UpperCamelCase__ , scheduler=UpperCamelCase__ )
# create a imagenet -> id dictionary for easier use
A_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A_ = int(UpperCamelCase__ )
A_ = dict(sorted(self.labels.items() ) )
def snake_case_ ( self , UpperCamelCase__ ) -> List[int]:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = list(UpperCamelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = 4.0 , UpperCamelCase__ = None , UpperCamelCase__ = 50 , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
A_ = len(UpperCamelCase__ )
A_ = self.transformer.config.sample_size
A_ = self.transformer.config.in_channels
A_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCamelCase__ , device=self.device , dtype=self.transformer.dtype , )
A_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A_ = torch.tensor(UpperCamelCase__ , device=self.device ).reshape(-1 )
A_ = torch.tensor([1000] * batch_size , device=self.device )
A_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A_ = latent_model_input[: len(UpperCamelCase__ ) // 2]
A_ = torch.cat([half, half] , dim=0 )
A_ = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = t
if not torch.is_tensor(UpperCamelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A_ = latent_model_input.device.type == """mps"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = torch.floataa if is_mps else torch.floataa
else:
A_ = torch.intaa if is_mps else torch.intaa
A_ = torch.tensor([timesteps] , dtype=UpperCamelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A_ = self.transformer(
UpperCamelCase__ , timestep=UpperCamelCase__ , class_labels=UpperCamelCase__ ).sample
# perform guidance
if guidance_scale > 1:
A_ , A_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A_ , A_ = torch.split(UpperCamelCase__ , len(UpperCamelCase__ ) // 2 , dim=0 )
A_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A_ = torch.cat([half_eps, half_eps] , dim=0 )
A_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A_ , A_ = torch.split(UpperCamelCase__ , UpperCamelCase__ , dim=1 )
else:
A_ = noise_pred
# compute previous image: x_t -> x_t-1
A_ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
if guidance_scale > 1:
A_ , A_ = latent_model_input.chunk(2 , dim=0 )
else:
A_ = latent_model_input
A_ = 1 / self.vae.config.scaling_factor * latents
A_ = self.vae.decode(UpperCamelCase__ ).sample
A_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 708 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
for attribute in key.split(""".""" ):
A_ = getattr(UpperCAmelCase__, UpperCAmelCase__ )
if weight_type is not None:
A_ = getattr(UpperCAmelCase__, UpperCAmelCase__ ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, hf_model.config.feat_extract_norm == """group""", )
A_ = True
else:
for key, mapped_key in MAPPING.items():
A_ = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
A_ = True
if "*" in mapped_key:
A_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
A_ = mapped_key.replace("""*""", UpperCAmelCase__ )
if "weight_g" in name:
A_ = """weight_g"""
elif "weight_v" in name:
A_ = """weight_v"""
elif "weight" in name:
A_ = """weight"""
elif "bias" in name:
A_ = """bias"""
else:
A_ = None
set_recursively(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
A_ = full_name.split("""conv_layers.""" )[-1]
A_ = name.split(""".""" )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__=None, UpperCAmelCase__=True ) -> List[Any]:
if config_path is not None:
A_ = HubertConfig.from_pretrained(UpperCAmelCase__ )
else:
A_ = HubertConfig()
if is_finetuned:
if dict_path:
A_ = Dictionary.load(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ = target_dict.pad_index
A_ = target_dict.bos_index
A_ = target_dict.eos_index
A_ = len(target_dict.symbols )
A_ = os.path.join(UpperCAmelCase__, """vocab.json""" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices, UpperCAmelCase__ )
A_ = WavaVecaCTCTokenizer(
UpperCAmelCase__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=UpperCAmelCase__, )
A_ = True if config.feat_extract_norm == """layer""" else False
A_ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_60_00, padding_value=0, do_normalize=UpperCAmelCase__, return_attention_mask=UpperCAmelCase__, )
A_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase__, tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
A_ = HubertForCTC(UpperCAmelCase__ )
else:
A_ = HubertModel(UpperCAmelCase__ )
if is_finetuned:
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
A_ , A_ , A_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A_ = model[0].eval()
recursively_load_weights(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowerCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=_snake_case ):
lowercase = ["keras_nlp"]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""keras_nlp"""] )
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
lowercase = ["pixel_values"]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 0.9 , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = size if size is not None else {"""shortest_edge""": 224}
A_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
A_ = do_resize
A_ = size
A_ = crop_pct
A_ = resample
A_ = do_center_crop
A_ = crop_size
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
A_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
A_ = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
A_ = int(size["""height"""] / crop_pct )
else:
A_ = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(UpperCamelCase__ ) )
A_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
else:
if "shortest_edge" in size:
A_ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ )
elif "height" in size and "width" in size:
A_ = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(UpperCamelCase__ ) )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
A_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ) -> PIL.Image.Image:
'''simple docstring'''
A_ = do_resize if do_resize is not None else self.do_resize
A_ = crop_pct if crop_pct is not None else self.crop_pct
A_ = resample if resample is not None else self.resample
A_ = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = size if size is not None else self.size
A_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A_ = crop_size if crop_size is not None else self.crop_size
A_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
A_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
A_ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , crop_pct=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
A_ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
A_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
A_ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
A_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
A_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A__ ( _snake_case ):
lowercase = 42
lowercase = jnp.floataa
lowercase = True
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
super().setup()
A_ = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
A_ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A__ ( _snake_case ):
lowercase = FlaxBigBirdForNaturalQuestionsModule
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
def cross_entropy(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None ):
A_ = logits.shape[-1]
A_ = (labels[..., None] == jnp.arange(UpperCAmelCase__ )[None]).astype("""f4""" )
A_ = jax.nn.log_softmax(UpperCAmelCase__, axis=-1 )
A_ = -jnp.sum(labels * logits, axis=-1 )
if reduction is not None:
A_ = reduction(UpperCAmelCase__ )
return loss
A_ = partial(UpperCAmelCase__, reduction=jnp.mean )
A_ = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
A_ = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
A_ = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A__ :
lowercase = "google/bigbird-roberta-base"
lowercase = 3_000
lowercase = 10_500
lowercase = 128
lowercase = 3
lowercase = 1
lowercase = 5
# tx_args
lowercase = 3e-5
lowercase = 0.0
lowercase = 20_000
lowercase = 0.0095
lowercase = "bigbird-roberta-natural-questions"
lowercase = "training-expt"
lowercase = "data/nq-training.jsonl"
lowercase = "data/nq-validation.jsonl"
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=UpperCamelCase__ )
A_ = os.path.join(self.base_dir , self.save_dir )
A_ = self.batch_size_per_device * jax.device_count()
@dataclass
class A__ :
lowercase = 42
lowercase = 4_096 # no dynamic padding on TPUs
def __call__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = self.collate_fn(UpperCamelCase__ )
A_ = jax.tree_util.tree_map(UpperCamelCase__ , UpperCamelCase__ )
return batch
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ , A_ = self.fetch_inputs(features["""input_ids"""] )
A_ = {
"""input_ids""": jnp.array(UpperCamelCase__ , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(UpperCamelCase__ , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = [self._fetch_inputs(UpperCamelCase__ ) for ids in input_ids]
return zip(*UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = [1 for _ in range(len(UpperCamelCase__ ) )]
while len(UpperCamelCase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None ) -> int:
if seed is not None:
A_ = dataset.shuffle(seed=UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) // batch_size ):
A_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCAmelCase__ )
@partial(jax.pmap, axis_name="""batch""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ) -> Any:
def loss_fn(UpperCAmelCase__ ):
A_ = model_inputs.pop("""start_labels""" )
A_ = model_inputs.pop("""end_labels""" )
A_ = model_inputs.pop("""pooled_labels""" )
A_ = state.apply_fn(**UpperCAmelCase__, params=UpperCAmelCase__, dropout_rng=UpperCAmelCase__, train=UpperCAmelCase__ )
A_ , A_ , A_ = outputs
return state.loss_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ , A_ = jax.random.split(UpperCAmelCase__ )
A_ = jax.value_and_grad(UpperCAmelCase__ )
A_ , A_ = grad_fn(state.params )
A_ = jax.lax.pmean({"""loss""": loss}, axis_name="""batch""" )
A_ = jax.lax.pmean(UpperCAmelCase__, """batch""" )
A_ = state.apply_gradients(grads=UpperCAmelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap, axis_name="""batch""" )
def UpperCAmelCase__ ( UpperCAmelCase__, **UpperCAmelCase__ ) -> List[str]:
A_ = model_inputs.pop("""start_labels""" )
A_ = model_inputs.pop("""end_labels""" )
A_ = model_inputs.pop("""pooled_labels""" )
A_ = state.apply_fn(**UpperCAmelCase__, params=state.params, train=UpperCAmelCase__ )
A_ , A_ , A_ = outputs
A_ = state.loss_fn(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = jax.lax.pmean({"""loss""": loss}, axis_name="""batch""" )
return metrics
class A__ ( train_state.TrainState ):
lowercase = struct.field(pytree_node=_snake_case )
@dataclass
class A__ :
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = None
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> int:
'''simple docstring'''
A_ = model.params
A_ = TrainState.create(
apply_fn=model.__call__ , params=UpperCamelCase__ , tx=UpperCamelCase__ , loss_fn=UpperCamelCase__ , )
if ckpt_dir is not None:
A_ , A_ , A_ , A_ , A_ = restore_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
A_ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
A_ , A_ = build_tx(**UpperCamelCase__ )
A_ = train_state.TrainState(
step=UpperCamelCase__ , apply_fn=model.__call__ , params=UpperCamelCase__ , tx=UpperCamelCase__ , opt_state=UpperCamelCase__ , )
A_ = args
A_ = data_collator
A_ = lr
A_ = params
A_ = jax_utils.replicate(UpperCamelCase__ )
return state
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.args
A_ = len(UpperCamelCase__ ) // args.batch_size
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(UpperCamelCase__ , jax.device_count() )
for epoch in range(args.max_epochs ):
A_ = jnp.array(0 , dtype=jnp.floataa )
A_ = get_batched_dataset(UpperCamelCase__ , args.batch_size , seed=UpperCamelCase__ )
A_ = 0
for batch in tqdm(UpperCamelCase__ , total=UpperCamelCase__ , desc=f'''Running EPOCH-{epoch}''' ):
A_ = self.data_collator(UpperCamelCase__ )
A_ , A_ , A_ = self.train_step_fn(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
A_ = jax_utils.unreplicate(state.step )
A_ = running_loss.item() / i
A_ = self.scheduler_fn(state_step - 1 )
A_ = self.evaluate(UpperCamelCase__ , UpperCamelCase__ )
A_ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(UpperCamelCase__ ) )
self.logger.log(UpperCamelCase__ , commit=UpperCamelCase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = get_batched_dataset(UpperCamelCase__ , self.args.batch_size )
A_ = len(UpperCamelCase__ ) // self.args.batch_size
A_ = jnp.array(0 , dtype=jnp.floataa )
A_ = 0
for batch in tqdm(UpperCamelCase__ , total=UpperCamelCase__ , desc="""Evaluating ... """ ):
A_ = self.data_collator(UpperCamelCase__ )
A_ = self.val_step_fn(UpperCamelCase__ , **UpperCamelCase__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = jax_utils.unreplicate(UpperCamelCase__ )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(UpperCamelCase__ , params=state.params )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(UpperCamelCase__ , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , UpperCamelCase__ )
print("""DONE""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
print(F'''RESTORING CHECKPOINT FROM {save_dir}''', end=""" ... """ )
with open(os.path.join(UpperCAmelCase__, """flax_model.msgpack""" ), """rb""" ) as f:
A_ = from_bytes(state.params, f.read() )
with open(os.path.join(UpperCAmelCase__, """opt_state.msgpack""" ), """rb""" ) as f:
A_ = from_bytes(state.opt_state, f.read() )
A_ = joblib.load(os.path.join(UpperCAmelCase__, """args.joblib""" ) )
A_ = joblib.load(os.path.join(UpperCAmelCase__, """data_collator.joblib""" ) )
with open(os.path.join(UpperCAmelCase__, """training_state.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = num_train_steps - warmup_steps
A_ = optax.linear_schedule(init_value=UpperCAmelCase__, end_value=UpperCAmelCase__, transition_steps=UpperCAmelCase__ )
A_ = optax.linear_schedule(init_value=UpperCAmelCase__, end_value=1e-7, transition_steps=UpperCAmelCase__ )
A_ = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] )
return lr
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
def weight_decay_mask(UpperCAmelCase__ ):
A_ = traverse_util.flatten_dict(UpperCAmelCase__ )
A_ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCAmelCase__ )
A_ = scheduler_fn(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = optax.adamw(learning_rate=UpperCAmelCase__, weight_decay=UpperCAmelCase__, mask=UpperCAmelCase__ )
return tx, lr
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__lowerCamelCase = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def snake_case_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
A_ = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def snake_case_ ( cls ) -> Tuple:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(UpperCamelCase__ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase__ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCamelCase__ , repo_id="""test-model-flax""" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase__ , 1e-3 , msg=f'''{key} not identical''' )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ = FlaxBertModel(UpperCamelCase__ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase__ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCamelCase__ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
A_ = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A_ = flatten_dict(unfreeze(model.params ) )
A_ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase__ , 1e-3 , msg=f'''{key} not identical''' )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = True
A_ = flatten_dict(modela.params )
A_ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
A_ = False
return models_are_equal
@require_flax
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A_ = FlaxBertModel(UpperCamelCase__ )
A_ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
with self.assertRaises(UpperCamelCase__ ):
A_ = FlaxBertModel.from_pretrained(UpperCamelCase__ )
A_ = FlaxBertModel.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ )
self.assertTrue(check_models_equal(UpperCamelCase__ , UpperCamelCase__ ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A_ = FlaxBertModel(UpperCamelCase__ )
A_ = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , max_shard_size="""10KB""" )
with self.assertRaises(UpperCamelCase__ ):
A_ = FlaxBertModel.from_pretrained(UpperCamelCase__ )
A_ = FlaxBertModel.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ )
self.assertTrue(check_models_equal(UpperCamelCase__ , UpperCamelCase__ ) )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = """bert"""
A_ = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(UpperCamelCase__ ):
A_ = FlaxBertModel.from_pretrained(UpperCamelCase__ )
A_ = FlaxBertModel.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = """bert"""
A_ = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(UpperCamelCase__ ):
A_ = FlaxBertModel.from_pretrained(UpperCamelCase__ )
A_ = FlaxBertModel.from_pretrained(UpperCamelCase__ , subfolder=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
A_ = 1
A_ = 1
while repunit:
A_ = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00_00 ) -> int:
A_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(UpperCAmelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
__lowerCamelCase = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__lowerCamelCase = 0
__lowerCamelCase = 0Xe000
__lowerCamelCase = 0Xe001
__lowerCamelCase = 0Xe002
__lowerCamelCase = 0Xe003
__lowerCamelCase = 0Xe004
# Maps special codepoints to human-readable names.
__lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '''[CLS]''',
SEP: '''[SEP]''',
BOS: '''[BOS]''',
MASK: '''[MASK]''',
PAD: '''[PAD]''',
RESERVED: '''[RESERVED]''',
}
# Maps special codepoint human-readable names to their codepoint values.
__lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A__ ( _snake_case ):
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=chr(UpperCamelCase__ ) , UpperCamelCase__=False , UpperCamelCase__=2048 , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , model_max_length=UpperCamelCase__ , **UpperCamelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
A_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
A_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
A_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
A_ = UNICODE_VOCAB_SIZE
A_ = len(self._special_codepoints )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self._unicode_vocab_size
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return list(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
return "".join(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
A_ = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple:
'''simple docstring'''
return ()
| 717 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[str]:
A_ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
A_ = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
A_ = dct.pop(UpperCAmelCase__ )
A_ = val
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
if "handwritten" in checkpoint_url:
A_ = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
A_ = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
A_ = ViTConfig(image_size=3_84, qkv_bias=UpperCAmelCase__ )
A_ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ = 10_24
A_ = 40_96
A_ = 24
A_ = 16
A_ = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ = False
A_ = """relu"""
A_ = 10_24
A_ = True
A_ = False
A_ = False
# load HuggingFace model
A_ = ViTModel(UpperCAmelCase__, add_pooling_layer=UpperCAmelCase__ )
A_ = TrOCRForCausalLM(UpperCAmelCase__ )
A_ = VisionEncoderDecoderModel(encoder=UpperCAmelCase__, decoder=UpperCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
A_ = torch.hub.load_state_dict_from_url(UpperCAmelCase__, map_location="""cpu""", check_hash=UpperCAmelCase__ )["""model"""]
A_ = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ = state_dict.pop(UpperCAmelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
A_ = val
else:
A_ = val
# load state dict
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image
A_ = ViTImageProcessor(size=encoder_config.image_size )
A_ = RobertaTokenizer.from_pretrained("""roberta-large""" )
A_ = TrOCRProcessor(UpperCAmelCase__, UpperCAmelCase__ )
A_ = processor(images=prepare_img(UpperCAmelCase__ ), return_tensors="""pt""" ).pixel_values
# verify logits
A_ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ = model(pixel_values=UpperCAmelCase__, decoder_input_ids=UpperCAmelCase__ )
A_ = outputs.logits
A_ = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
A_ = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
A_ = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
A_ = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10], UpperCAmelCase__, atol=1e-3 ), "First elements of logits not as expected"
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 718 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__lowerCamelCase = get_logger(__name__)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
A_ = module._original_module if isinstance(UpperCamelCase__ , _PatchedModuleObj ) else module
class A__ :
lowercase = []
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> str:
'''simple docstring'''
A_ = obj
A_ = target
A_ = new
A_ = target.split(""".""" )[0]
A_ = {}
A_ = attrs or []
def __enter__( self ) -> List[Any]:
'''simple docstring'''
*A_ , A_ = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCamelCase__ ) ):
try:
A_ = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ = getattr(self.obj , UpperCamelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCamelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ = obj_attr
# patch at top level
setattr(self.obj , UpperCamelCase__ , _PatchedModuleObj(UpperCamelCase__ , attrs=self.attrs ) )
A_ = getattr(self.obj , UpperCamelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCamelCase__ , UpperCamelCase__ , _PatchedModuleObj(getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , attrs=self.attrs ) )
A_ = getattr(UpperCamelCase__ , UpperCamelCase__ )
# finally set the target attribute
setattr(UpperCamelCase__ , UpperCamelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ = getattr(import_module(""".""".join(UpperCamelCase__ ) ) , UpperCamelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCamelCase__ ) is attr_value:
A_ = getattr(self.obj , UpperCamelCase__ )
setattr(self.obj , UpperCamelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ = globals()["""__builtins__"""][target_attr]
setattr(self.obj , UpperCamelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self , *UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , UpperCamelCase__ , self.original.pop(UpperCamelCase__ ) )
def snake_case_ ( self ) -> str:
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ), end="""\t""" )
else:
print("""INF""", end="""\t""" )
print()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
A_ = [[float("""inf""" ) for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
for j in range(UpperCAmelCase__ ):
A_ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(UpperCAmelCase__ ):
# looping through rows of graph array
for i in range(UpperCAmelCase__ ):
# looping through columns of graph array
for j in range(UpperCAmelCase__ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ = dist[i][k] + dist[k][j]
_print_dist(UpperCAmelCase__, UpperCAmelCase__ )
return dist, v
if __name__ == "__main__":
__lowerCamelCase = int(input('''Enter number of vertices: '''))
__lowerCamelCase = int(input('''Enter number of edges: '''))
__lowerCamelCase = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
__lowerCamelCase = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
__lowerCamelCase = int(input('''Enter source:'''))
__lowerCamelCase = int(input('''Enter destination:'''))
__lowerCamelCase = float(input('''Enter weight:'''))
__lowerCamelCase = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 700 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 701 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A__ ( _snake_case ):
lowercase = 42
lowercase = 42
class A__ ( nn.Module ):
lowercase = 42
lowercase = (16, 32, 96, 256)
lowercase = jnp.floataa
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A_ = []
for i in range(len(self.block_out_channels ) - 1 ):
A_ = self.block_out_channels[i]
A_ = self.block_out_channels[i + 1]
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
A_ = blocks
A_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = self.conv_in(UpperCamelCase__ )
A_ = nn.silu(UpperCamelCase__ )
for block in self.blocks:
A_ = block(UpperCamelCase__ )
A_ = nn.silu(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return embedding
@flax_register_to_config
class A__ ( nn.Module , _snake_case , _snake_case ):
lowercase = 32
lowercase = 4
lowercase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase = False
lowercase = (320, 640, 1_280, 1_280)
lowercase = 2
lowercase = 8
lowercase = None
lowercase = 1_280
lowercase = 0.0
lowercase = False
lowercase = jnp.floataa
lowercase = True
lowercase = 0
lowercase = "rgb"
lowercase = (16, 32, 96, 256)
def snake_case_ ( self , UpperCamelCase__ ) -> FrozenDict:
'''simple docstring'''
A_ = (1, self.in_channels, self.sample_size, self.sample_size)
A_ = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
A_ = jnp.ones((1,) , dtype=jnp.intaa )
A_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
A_ = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
A_ , A_ = jax.random.split(UpperCamelCase__ )
A_ = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["params"]
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.block_out_channels
A_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A_ = self.num_attention_heads or self.attention_head_dim
# input
A_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A_ = FlaxTimestepEmbedding(UpperCamelCase__ , dtype=self.dtype )
A_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
A_ = self.only_cross_attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = (num_attention_heads,) * len(self.down_block_types )
# down
A_ = []
A_ = []
A_ = block_out_channels[0]
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A_ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
A_ = FlaxDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCamelCase__ )
for _ in range(self.layers_per_block ):
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
if not is_final_block:
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
A_ = down_blocks
A_ = controlnet_down_blocks
# mid
A_ = block_out_channels[-1]
A_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
A_ = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1.0 , UpperCamelCase__ = True , UpperCamelCase__ = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
A_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
A_ = jnp.flip(UpperCamelCase__ , axis=1 )
# 1. time
if not isinstance(UpperCamelCase__ , jnp.ndarray ):
A_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
A_ = timesteps.astype(dtype=jnp.floataa )
A_ = jnp.expand_dims(UpperCamelCase__ , 0 )
A_ = self.time_proj(UpperCamelCase__ )
A_ = self.time_embedding(UpperCamelCase__ )
# 2. pre-process
A_ = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
A_ = self.conv_in(UpperCamelCase__ )
A_ = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
A_ = self.controlnet_cond_embedding(UpperCamelCase__ )
sample += controlnet_cond
# 3. down
A_ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ , A_ = down_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
else:
A_ , A_ = down_block(UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
# 5. contronet blocks
A_ = ()
for down_block_res_sample, controlnet_block in zip(UpperCamelCase__ , self.controlnet_down_blocks ):
A_ = controlnet_block(UpperCamelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
A_ = controlnet_down_block_res_samples
A_ = self.controlnet_mid_block(UpperCamelCase__ )
# 6. scaling
A_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCamelCase__ , mid_block_res_sample=UpperCamelCase__ )
| 702 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class A__ ( _snake_case ):
lowercase = "xlm-roberta"
def __init__( self , UpperCamelCase__=30522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class A__ ( _snake_case ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 703 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__lowerCamelCase = (3, 9, -11, 0, 7, 5, 1, -1)
__lowerCamelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A__ :
lowercase = 42
lowercase = 42
class A__ :
def __init__( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = None
for i in sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ):
A_ = Node(UpperCamelCase__ , self.head )
def __iter__( self ) -> Iterator[int]:
'''simple docstring'''
A_ = self.head
while node:
yield node.data
A_ = node.next_node
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self ) -> str:
'''simple docstring'''
return " -> ".join([str(UpperCamelCase__ ) for node in self] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> SortedLinkedList:
return SortedLinkedList(list(UpperCAmelCase__ ) + list(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 705 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 0 |
'''simple docstring'''
import os
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = len(grid[0] )
A_ = len(UpperCAmelCase__ )
A_ = 0
A_ = 0
A_ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(UpperCAmelCase__ ):
for j in range(n_rows - 3 ):
A_ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
A_ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
A_ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
A_ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
A_ = max(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if max_product > largest:
A_ = max_product
return largest
def UpperCAmelCase__ ( ) -> Tuple:
A_ = []
with open(os.path.dirname(UpperCAmelCase__ ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
A_ = [[int(UpperCAmelCase__ ) for i in grid[j]] for j in range(len(UpperCAmelCase__ ) )]
return largest_product(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
A_ = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCamelCase__ ) , torch_builtin(UpperCamelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCamelCase__ ) , gelu_new(UpperCamelCase__ ) ) )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
A_ = get_activation("""gelu""" )
A_ = get_activation("""gelu_10""" )
A_ = torch_builtin(UpperCamelCase__ )
A_ = geluaa(UpperCamelCase__ )
A_ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCamelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCamelCase__ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCamelCase__ ):
get_activation(UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = get_activation("""gelu""" )
A_ = 1
A_ = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCamelCase__ ):
A_ = acta.a
| 707 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[Any]:
# Initialise PyTorch model
A_ = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
A_ = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 708 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowerCamelCase = pytest.mark.integration
__lowerCamelCase = {'''comet'''}
__lowerCamelCase = importlib.util.find_spec('''fairseq''') is not None
__lowerCamelCase = {'''code_eval'''}
__lowerCamelCase = os.name == '''nt'''
__lowerCamelCase = {'''bertscore''', '''frugalscore''', '''perplexity'''}
__lowerCamelCase = importlib.util.find_spec('''transformers''') is not None
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
@wraps(UpperCAmelCase__ )
def wrapper(self, UpperCAmelCase__ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self, UpperCAmelCase__ )
return wrapper
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
@wraps(UpperCAmelCase__ )
def wrapper(self, UpperCAmelCase__ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self, UpperCAmelCase__ )
return wrapper
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
@wraps(UpperCAmelCase__ )
def wrapper(self, UpperCAmelCase__ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self, UpperCAmelCase__ )
return wrapper
def UpperCAmelCase__ ( ) -> List[str]:
A_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_snake_case , _snake_case , _snake_case )
@local
class A__ ( parameterized.TestCase ):
lowercase = {}
lowercase = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = """[...]"""
A_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCamelCase__ ) ).module_path )
A_ = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase__ )
# check parameters
A_ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCamelCase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
A_ = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = """[...]"""
A_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCamelCase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
A_ = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase__ ):
yield
else:
yield
@contextmanager
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
def load_local_metric(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ):
return load_metric(os.path.join("""metrics""" , UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
A_ = load_local_metric
yield
@classmethod
def snake_case_ ( cls , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
def wrapper(UpperCamelCase__ ):
A_ = contextmanager(UpperCamelCase__ )
A_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""", """""", """""" ) # handle pytest cli flags
class A__ ( _snake_case ):
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
A_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
import torch
def bert_cos_score_idf(UpperCAmelCase__, UpperCAmelCase__, *UpperCAmelCase__, **UpperCAmelCase__ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCAmelCase__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
A_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
def load_from_checkpoint(UpperCAmelCase__ ):
class A__ :
def snake_case_ ( self , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
assert len(UpperCamelCase__ ) == 2
A_ = [0.19, 0.92]
return scores, sum(UpperCamelCase__ ) / len(UpperCamelCase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
A_ = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
A_ = load_from_checkpoint
yield
def UpperCAmelCase__ ( ) -> Dict:
A_ = load_metric(os.path.join("""metrics""", """seqeval""" ) )
A_ = """ERROR"""
A_ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(UpperCAmelCase__, match=re.escape(UpperCAmelCase__ ) ):
metric.compute(predictions=[], references=[], scheme=UpperCAmelCase__ )
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( _snake_case , unittest.TestCase ):
lowercase = DebertaTokenizer
lowercase = True
lowercase = DebertaTokenizerFast
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
A_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ = {"""unk_token""": """[UNK]"""}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
def snake_case_ ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = """lower newer"""
A_ = """lower newer"""
return input_text, output_text
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = """lower newer"""
A_ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = tokens + [tokenizer.unk_token]
A_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = tokenizer("""Hello""" , """World""" )
A_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
A_ = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase__ )
A_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase__ )
A_ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
A_ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A_ = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
A_ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
A_ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ )
A_ = [tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) for seq in encoding["""input_ids"""]]
# fmt: off
A_ = {
"""input_ids""": [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A_ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , UpperCamelCase__ )
for expected, decoded in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCamelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
A_ = test_results.split(""" """ )
A_ = 0
A_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A_ = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCAmelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
A_ = {}
A_ = None
A_ = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""", UpperCAmelCase__ ):
A_ = True
A_ = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
A_ = line
A_ = False
return failures
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = title
A_ = doc_test_results["""time_spent"""].split(""",""" )[0]
A_ = doc_test_results["""success"""]
A_ = doc_test_results["""failures"""]
A_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
A_ = doc_test_results
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = [self._time_spent]
A_ = 0
for time in time_spent:
A_ = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase__ ) == 1:
A_ = [0, 0, time_parts[0]]
A_ , A_ , A_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
A_ , A_ , A_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'''{int(UpperCamelCase__ )}h{int(UpperCamelCase__ )}m{int(UpperCamelCase__ )}s'''
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = 40
A_ = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
A_ = """"""
for category, failures in category_failures.items():
if len(UpperCamelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCamelCase__ )
@staticmethod
def snake_case_ ( ) -> Optional[int]:
'''simple docstring'''
A_ = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(UpperCamelCase__ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=UpperCamelCase__ , )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
A_ = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
A_ = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=UpperCamelCase__ , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = """"""
for key, value in failures.items():
A_ = value[:200] + """ [Truncated]""" if len(UpperCamelCase__ ) > 250 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
A_ = job_name
A_ = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
A_ = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case_ ( self ) -> int:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
A_ = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
A_ = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
A_ = f'''*Num failures* :{len(job_result["failed"] )} \n'''
A_ = job_result["""failures"""]
A_ = self.get_reply_blocks(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text=UpperCamelCase__ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f'''Results for {job}''' , blocks=UpperCamelCase__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = os.environ["""GITHUB_RUN_ID"""]
A_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
A_ = requests.get(UpperCAmelCase__ ).json()
A_ = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A_ = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(UpperCAmelCase__ ):
A_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""", UpperCAmelCase__ )
return {}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = {}
if os.path.exists(UpperCAmelCase__ ):
A_ = os.listdir(UpperCAmelCase__ )
for file in files:
try:
with open(os.path.join(UpperCAmelCase__, UpperCAmelCase__ ), encoding="""utf-8""" ) as f:
A_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(UpperCAmelCase__, UpperCAmelCase__ )}.''' ) from e
return _artifact
def UpperCAmelCase__ ( ) -> Optional[Any]:
class A__ :
def __init__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = name
A_ = []
def __str__( self ) -> Dict:
'''simple docstring'''
return self.name
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
A_ = {}
A_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
A_ = directory
if artifact_name not in _available_artifacts:
A_ = Artifact(UpperCAmelCase__ )
_available_artifacts[artifact_name].add_path(UpperCAmelCase__ )
return _available_artifacts
if __name__ == "__main__":
__lowerCamelCase = get_job_links()
__lowerCamelCase = retrieve_available_artifacts()
__lowerCamelCase = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCamelCase = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCamelCase = github_actions_job_links.get('''run_doctests''')
__lowerCamelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
__lowerCamelCase = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = handle_test_results(artifact['''stats'''])
__lowerCamelCase = failed
__lowerCamelCase = success
__lowerCamelCase = time_spent[1:-1] + ''', '''
__lowerCamelCase = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
__lowerCamelCase = line.replace('''FAILED ''', '''''')
__lowerCamelCase = line.split()[0].replace('''\n''', '''''')
if "::" in line:
__lowerCamelCase , __lowerCamelCase = line.split('''::''')
else:
__lowerCamelCase , __lowerCamelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCamelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCamelCase = all_failures[test] if test in all_failures else '''N/A'''
__lowerCamelCase = failure
break
__lowerCamelCase = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=0 , ) -> Dict:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = projection_dim
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
A_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = TFDPRContextEncoder(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = TFDPRQuestionEncoder(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = TFDPRReader(config=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = TFDPRModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFDPRReader.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
A_ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
A_ = model(UpperCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A_ = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def UpperCAmelCase__ ( UpperCAmelCase__=None ) -> str:
A_ = argparse.ArgumentParser(add_help=UpperCAmelCase__, allow_abbrev=UpperCAmelCase__ )
# The main config parser
A_ = config_command_parser(UpperCAmelCase__ )
# The subparser to add commands to
A_ = config_parser.add_subparsers(title="""subcommands""", dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(UpperCAmelCase__, parents=[parent_parser] )
update_command_parser(UpperCAmelCase__, parents=[parent_parser] )
return config_parser
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = get_config_parser()
A_ = config_parser.parse_args()
if not hasattr(UpperCAmelCase__, """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
create_state_space_tree(UpperCAmelCase__, [], 0, [0 for i in range(len(UpperCAmelCase__ ) )] )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> None:
if index == len(UpperCAmelCase__ ):
print(UpperCAmelCase__ )
return
for i in range(len(UpperCAmelCase__ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A_ = True
create_state_space_tree(UpperCAmelCase__, UpperCAmelCase__, index + 1, UpperCAmelCase__ )
current_sequence.pop()
A_ = False
__lowerCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowerCamelCase = ['''A''', '''B''', '''C''']
generate_all_permutations(sequence_a)
| 714 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( _snake_case ):
lowercase = 42
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("DownEncoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__=True , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = x
A_ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCamelCase__ )
# middle
A_ = self.mid_block(UpperCamelCase__ )
# post-process
A_ = self.conv_norm_out(UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=3 , UpperCamelCase__=("UpDecoderBlock2D",) , UpperCamelCase__=(64,) , UpperCamelCase__=2 , UpperCamelCase__=32 , UpperCamelCase__="silu" , UpperCamelCase__="group" , ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == """spatial""" else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
A_ = list(reversed(UpperCamelCase__ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCamelCase__ ) - 1
A_ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
A_ = False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Optional[Any]:
'''simple docstring'''
A_ = z
A_ = self.conv_in(UpperCamelCase__ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ ):
def custom_forward(*UpperCamelCase__ ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
A_ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
A_ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCamelCase__ )
else:
A_ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
A_ = self.conv_act(UpperCamelCase__ )
A_ = self.conv_out(UpperCamelCase__ )
return sample
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="random" , UpperCamelCase__=False , UpperCamelCase__=True ) -> str:
'''simple docstring'''
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
A_ = n_e
A_ = sane_index_shape
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = inds.shape
assert len(UpperCamelCase__ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCamelCase__ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCamelCase__ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCamelCase__ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCamelCase__ )
if shape is not None:
A_ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( _snake_case ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False ) -> Dict:
'''simple docstring'''
A_ = parameters
A_ , A_ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -30.0 , 20.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case_ ( self , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def snake_case_ ( self , UpperCamelCase__=None ) -> int:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=[1, 2, 3] ) -> Optional[Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return self.mean
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
A_ = [False] * len(UpperCAmelCase__ )
A_ = []
queue.append(UpperCAmelCase__ )
A_ = True
while queue:
A_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase__ )
A_ = True
A_ = u
return visited[t]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
# This array is filled by BFS and to store path
A_ = [-1] * (len(UpperCAmelCase__ ))
A_ = 0
while bfs(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ):
A_ = float("""Inf""" )
A_ = sink
while s != source:
# Find the minimum value in select path
A_ = min(UpperCAmelCase__, graph[parent[s]][s] )
A_ = parent[s]
max_flow += path_flow
A_ = sink
while v != source:
A_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A_ = parent[v]
return max_flow
__lowerCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__lowerCamelCase , __lowerCamelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCamelCase = '''<<<<<<< This should probably be modified because it mentions: '''
__lowerCamelCase = '''=======
>>>>>>>
'''
__lowerCamelCase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
__lowerCamelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
return ConvertCommand(args.tfds_path, args.datasets_directory )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = get_logger("""datasets-cli/converting""" )
A_ = tfds_path
A_ = datasets_directory
def snake_case_ ( self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
A_ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
A_ = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
A_ = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
A_ = []
A_ = []
A_ = {}
if os.path.isdir(self._tfds_path ):
A_ = os.listdir(UpperCamelCase__ )
else:
A_ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
A_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
A_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not os.path.isfile(UpperCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = []
A_ = False
A_ = False
A_ = []
for line in lines:
A_ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
A_ = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
A_ = """"""
continue
elif "from absl import logging" in out_line:
A_ = """from datasets import logging\n"""
elif "getLogger" in out_line:
A_ = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
A_ = True
A_ = list(filter(lambda UpperCamelCase__ : e in out_line , UpperCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase__ ) + """\n""" )
out_lines.append(UpperCamelCase__ )
out_lines.append(UpperCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
A_ = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
A_ = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , UpperCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
A_ = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
A_ = True
out_lines.append(UpperCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
A_ = f_name.replace(""".py""" , """""" )
A_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
A_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase__ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase__ )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(UpperCamelCase__ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
A_ = os.path.basename(UpperCamelCase__ )
A_ = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(UpperCamelCase__ , UpperCamelCase__ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 716 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 667 | 0 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = tf.convert_to_tensor(UpperCAmelCase__ )
A_ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ), x.dtype ) ))
return x * cdf
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
A_ = tf.convert_to_tensor(UpperCAmelCase__ )
A_ = tf.cast(math.pi, x.dtype )
A_ = tf.cast(0.044_715, x.dtype )
A_ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCAmelCase__, 3 )) ))
return x * cdf
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
A_ = tf.convert_to_tensor(UpperCAmelCase__ )
return x * tf.tanh(tf.math.softplus(UpperCAmelCase__ ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
A_ = tf.convert_to_tensor(UpperCAmelCase__ )
A_ = tf.cast(0.044_715, x.dtype )
A_ = tf.cast(0.7_978_845_608, x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = tf.convert_to_tensor(UpperCAmelCase__ )
A_ = tf.cast(1.702, x.dtype )
return x * tf.math.sigmoid(coeff * x )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
return tf.clip_by_value(_gelu(UpperCAmelCase__ ), -10, 10 )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=-1 ) -> Optional[int]:
A_ , A_ = tf.split(UpperCAmelCase__, 2, axis=UpperCAmelCase__ )
return a * tf.math.sigmoid(UpperCAmelCase__ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return tf.keras.activations.gelu(UpperCAmelCase__, approximate=UpperCAmelCase__ )
__lowerCamelCase = tf.keras.activations.gelu
__lowerCamelCase = approximate_gelu_wrap
else:
__lowerCamelCase = _gelu
__lowerCamelCase = _gelu_new
__lowerCamelCase = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 717 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase__ ( ) -> Dict:
A_ = cn.convert_to_negative(UpperCAmelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase__ ( ) -> List[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase__, 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def UpperCAmelCase__ ( ) -> str:
A_ = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase__ ( ) -> Union[str, Any]:
A_ = imread("""digital_image_processing/image_data/lena_small.jpg""", 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ = canny.canny(UpperCAmelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase__ ( ) -> Dict:
assert gg.gaussian_filter(UpperCAmelCase__, 5, sigma=0.9 ).all()
def UpperCAmelCase__ ( ) -> int:
# laplace diagonals
A_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ = conv.img_convolve(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
assert res.any()
def UpperCAmelCase__ ( ) -> List[Any]:
assert med.median_filter(UpperCAmelCase__, 3 ).any()
def UpperCAmelCase__ ( ) -> List[Any]:
A_ , A_ = sob.sobel_filter(UpperCAmelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase__ ( ) -> List[str]:
A_ = sp.make_sepia(UpperCAmelCase__, 20 )
assert sepia.all()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
A_ = bs.Burkes(imread(UpperCAmelCase__, 1 ), 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase__ ( UpperCAmelCase__ = "digital_image_processing/image_data/lena_small.jpg", ) -> Optional[int]:
A_ = rs.NearestNeighbour(imread(UpperCAmelCase__, 1 ), 4_00, 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase__ ( ) -> Optional[int]:
A_ = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ = imread(UpperCAmelCase__, 0 )
# Test for get_neighbors_pixel function() return not None
A_ = 0
A_ = 0
A_ = image[x_coordinate][y_coordinate]
A_ = lbp.get_neighbors_pixel(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
A_ = lbp.local_binary_value(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
assert lbp_image.any()
| 667 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( _snake_case ):
lowercase = "ClapFeatureExtractor"
lowercase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
A_ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if audios is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and audios is not None:
A_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.tokenizer.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 718 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> None:
if point:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__, (int, float) ):
A_ = (
"""Expected a list of numbers as input, found """
F'''{type(UpperCAmelCase__ ).__name__}'''
)
raise TypeError(UpperCAmelCase__ )
else:
A_ = F'''Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}'''
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A__ ( _snake_case ):
lowercase = 42
lowercase = 42
class A__ ( _snake_case , _snake_case ):
lowercase = 1
@register_to_config
def __init__( self , UpperCamelCase__ = 2000 , UpperCamelCase__ = 0.15 , UpperCamelCase__ = 0.01 , UpperCamelCase__ = 1348.0 , UpperCamelCase__ = 1e-5 , UpperCamelCase__ = 1 , ) -> List[Any]:
'''simple docstring'''
A_ = sigma_max
# setable values
A_ = None
self.set_sigmas(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> Union[str, Any]:
'''simple docstring'''
A_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ = torch.linspace(1 , UpperCamelCase__ , UpperCamelCase__ , device=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None ) -> Optional[int]:
'''simple docstring'''
A_ = sigma_min if sigma_min is not None else self.config.sigma_min
A_ = sigma_max if sigma_max is not None else self.config.sigma_max
A_ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCamelCase__ , UpperCamelCase__ )
A_ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ = torch.exp(torch.linspace(math.log(UpperCamelCase__ ) , math.log(UpperCamelCase__ ) , UpperCamelCase__ ) )
A_ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
A_ = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ = timesteps.to(self.discrete_sigmas.device )
A_ = self.discrete_sigmas[timesteps].to(sample.device )
A_ = self.get_adjacent_sigma(UpperCamelCase__ , UpperCamelCase__ ).to(sample.device )
A_ = torch.zeros_like(UpperCamelCase__ )
A_ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ = diffusion.unsqueeze(-1 )
A_ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ = randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCamelCase__ , device=sample.device , dtype=sample.dtype )
A_ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCamelCase__ , prev_sample_mean=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCamelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ = step_size.unsqueeze(-1 )
A_ = sample + step_size * model_output
A_ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> torch.FloatTensor:
'''simple docstring'''
A_ = timesteps.to(original_samples.device )
A_ = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCamelCase__ ) * sigmas[:, None, None, None]
)
A_ = noise + original_samples
return noisy_samples
def __len__( self ) -> int:
'''simple docstring'''
return self.config.num_train_timesteps
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class A__ ( unittest.TestCase ):
@require_torch
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
A_ = load_dataset("""ashraq/esc50""" )
A_ = dataset["""train"""]["""audio"""][-1]["""array"""]
A_ = audio_classifier(UpperCamelCase__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@slow
@require_torch
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
A_ = load_dataset("""ashraq/esc50""" )
A_ = dataset["""train"""]["""audio"""][-1]["""array"""]
A_ = audio_classifier(UpperCamelCase__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
A_ = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
A_ = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 721 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> set[str]:
A_ , A_ = set(UpperCAmelCase__ ), [start]
while stack:
A_ = stack.pop()
explored.add(UpperCAmelCase__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase__ )
return explored
__lowerCamelCase = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 700 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=1 , UpperCamelCase__=False , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A_ = vocab_size
A_ = d_embed
A_ = d_proj
A_ = cutoffs + [vocab_size]
A_ = [0] + self.cutoffs
A_ = div_val
A_ = self.cutoffs[0]
A_ = len(self.cutoffs ) - 1
A_ = self.shortlist_size + self.n_clusters
A_ = keep_order
A_ = []
A_ = []
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
A_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_weight""" )
A_ = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ = self.d_embed // (self.div_val**i)
A_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_projs_._{i}''' )
self.out_projs.append(UpperCamelCase__ )
A_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._weight''' , )
A_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=UpperCamelCase__ , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
A_ = x
if proj is not None:
A_ = tf.einsum("""ibd,ed->ibe""" , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum("""ibd,nd->ibn""" , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = shape_list(UpperCamelCase__ )
A_ = tf.range(lp_size[0] , dtype=target.dtype )
A_ = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
A_ = 0
if self.n_clusters == 0:
A_ = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
A_ = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
A_ = shape_list(UpperCamelCase__ )
A_ = []
A_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ = (target >= l_idx) & (target < r_idx)
A_ = tf.where(UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
A_ = self.out_layers[0][0][l_idx:r_idx]
A_ = self.out_layers[0][1][l_idx:r_idx]
else:
A_ = self.out_layers[i][0]
A_ = self.out_layers[i][1]
if i == 0:
A_ = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
A_ = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
A_ = tf.nn.log_softmax(UpperCamelCase__ )
A_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
A_ = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
A_ = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
A_ = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 667 | 0 |
'''simple docstring'''
from math import isqrt
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
A_ = [True] * max_number
for i in range(2, isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2, UpperCAmelCase__, UpperCAmelCase__ ):
A_ = False
return [i for i in range(2, UpperCAmelCase__ ) if is_prime[i]]
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**8 ) -> int:
A_ = calculate_prime_numbers(max_number // 2 )
A_ = 0
A_ = 0
A_ = len(UpperCAmelCase__ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 701 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A_ = cst_fwd.get(UpperCAmelCase__, np.inf )
A_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A_ = new_cost_f
A_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = -1
A_ = set()
A_ = set()
A_ = {source: 0}
A_ = {destination: 0}
A_ = {source: None}
A_ = {destination: None}
A_ = PriorityQueue()
A_ = PriorityQueue()
A_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A_ , A_ = queue_forward.get()
visited_forward.add(UpperCAmelCase__ )
A_ , A_ = queue_backward.get()
visited_backward.add(UpperCAmelCase__ )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ = pass_and_relaxation(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A_ = shortest_distance
return shortest_path_distance
__lowerCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__lowerCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
lowercase = "AutoTokenizer"
lowercase = ["tokenizer"]
lowercase = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ )
A_ = speaker_embeddings
@classmethod
def snake_case_ ( cls , UpperCamelCase__ , UpperCamelCase__="speaker_embeddings_path.json" , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
A_ = get_file_from_repo(
UpperCamelCase__ , UpperCamelCase__ , subfolder=kwargs.pop("""subfolder""" , UpperCamelCase__ ) , cache_dir=kwargs.pop("""cache_dir""" , UpperCamelCase__ ) , force_download=kwargs.pop("""force_download""" , UpperCamelCase__ ) , proxies=kwargs.pop("""proxies""" , UpperCamelCase__ ) , resume_download=kwargs.pop("""resume_download""" , UpperCamelCase__ ) , local_files_only=kwargs.pop("""local_files_only""" , UpperCamelCase__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , UpperCamelCase__ ) , revision=kwargs.pop("""revision""" , UpperCamelCase__ ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(UpperCamelCase__ , UpperCamelCase__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
A_ = None
else:
with open(UpperCamelCase__ ) as speaker_embeddings_json:
A_ = json.load(UpperCamelCase__ )
else:
A_ = None
A_ = AutoTokenizer.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
return cls(tokenizer=UpperCamelCase__ , speaker_embeddings=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__="speaker_embeddings_path.json" , UpperCamelCase__="speaker_embeddings" , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCamelCase__ , UpperCamelCase__ , """v2""" ) , exist_ok=UpperCamelCase__ )
A_ = {}
A_ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A_ = self._load_voice_preset(UpperCamelCase__ )
A_ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , UpperCamelCase__ , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=UpperCamelCase__ , )
A_ = os.path.join(UpperCamelCase__ , f'''{prompt_key}_{key}.npy''' )
A_ = tmp_dict
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , """w""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
super().save_pretrained(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ = None , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.speaker_embeddings[voice_preset]
A_ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
A_ = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , UpperCamelCase__ ) , cache_dir=kwargs.pop("""cache_dir""" , UpperCamelCase__ ) , force_download=kwargs.pop("""force_download""" , UpperCamelCase__ ) , proxies=kwargs.pop("""proxies""" , UpperCamelCase__ ) , resume_download=kwargs.pop("""resume_download""" , UpperCamelCase__ ) , local_files_only=kwargs.pop("""local_files_only""" , UpperCamelCase__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , UpperCamelCase__ ) , revision=kwargs.pop("""revision""" , UpperCamelCase__ ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
A_ = np.load(UpperCamelCase__ )
return voice_preset_dict
def snake_case_ ( self , UpperCamelCase__ = None ) -> int:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="pt" , UpperCamelCase__=256 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
if voice_preset is not None and not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A_ = self._load_voice_preset(UpperCamelCase__ )
else:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not voice_preset.endswith(""".npz""" ):
A_ = voice_preset + """.npz"""
A_ = np.load(UpperCamelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCamelCase__ , **UpperCamelCase__ )
A_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
A_ = self.tokenizer(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding="""max_length""" , max_length=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
if voice_preset is not None:
A_ = voice_preset
return encoded_text
| 702 |
'''simple docstring'''
import os
__lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = 0
A_ = 0
while index < len(UpperCAmelCase__ ) - 1:
A_ = SYMBOLS[numerals[index]]
A_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = """"""
A_ = num // 10_00
numerals += m_count * "M"
num %= 10_00
A_ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
A_ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase__ ( UpperCAmelCase__ = "/p089_roman.txt" ) -> int:
A_ = 0
with open(os.path.dirname(UpperCAmelCase__ ) + roman_numerals_filename ) as filea:
A_ = filea.readlines()
for line in lines:
A_ = line.strip()
A_ = parse_roman_numerals(UpperCAmelCase__ )
A_ = generate_roman_numerals(UpperCAmelCase__ )
savings += len(UpperCAmelCase__ ) - len(UpperCAmelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
import math
import random
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__lowerCamelCase = 0.02
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> float:
A_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(UpperCAmelCase__ ):
# Forward propagation
A_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A_ = (expected / 1_00) - layer_a
# Error delta
A_ = layer_1_error * sigmoid_function(UpperCAmelCase__, UpperCAmelCase__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = int(input('''Expected value: '''))
__lowerCamelCase = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 703 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 0 |
'''simple docstring'''
import math
from collections.abc import Callable
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> float:
A_ = xa
A_ = xa
while True:
if x_n == x_na or function(UpperCAmelCase__ ) == function(UpperCAmelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ = x_na - (
function(UpperCAmelCase__ ) / ((function(UpperCAmelCase__ ) - function(UpperCAmelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ = x_na
A_ = x_na
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> float:
return math.pow(UpperCAmelCase__, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 704 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
return EnvironmentCommand()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return EnvironmentCommand(args.accelerate_config_file )
class A__ ( _snake_case ):
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCamelCase__ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=UpperCamelCase__ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ ) -> None:
'''simple docstring'''
A_ = accelerate_config_file
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """not installed"""
if is_safetensors_available():
import safetensors
A_ = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
A_ = """not installed"""
A_ = A_ = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCamelCase__ ):
A_ = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else f'''\t{accelerate_config}'''
)
A_ = """not installed"""
A_ = """NA"""
if is_torch_available():
import torch
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = """not installed"""
A_ = """NA"""
if is_tf_available():
import tensorflow as tf
A_ = tf.__version__
try:
# deprecated in v2.1
A_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ = """not installed"""
A_ = """not installed"""
A_ = """not installed"""
A_ = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ = flax.__version__
A_ = jax.__version__
A_ = jaxlib.__version__
A_ = jax.lib.xla_bridge.get_backend().platform
A_ = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'''{safetensors_version}''',
"""Accelerate version""": f'''{accelerate_version}''',
"""Accelerate config""": f'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": f'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": f'''{flax_version} ({jax_backend})''',
"""Jax version""": f'''{jax_version}''',
"""JaxLib version""": f'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCamelCase__ ) )
return info
@staticmethod
def snake_case_ ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 667 | 0 |
'''simple docstring'''
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
A_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1 / 1_23_45 ) -> int:
A_ = 0
A_ = 0
A_ = 3
while True:
A_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(UpperCAmelCase__ ):
A_ = int(UpperCAmelCase__ )
total_partitions += 1
if check_partition_perfect(UpperCAmelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(UpperCAmelCase__ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 705 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _snake_case , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return 100
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
A_ = PriorTransformer(**UpperCamelCase__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
A_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
A_ = CLIPVisionModelWithProjection(UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_image_encoder
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_image_processor
A_ = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , clip_sample_range=10.0 , )
A_ = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.image_embeds
A_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A_ = image[0, -10:]
A_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
A_ = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
A_ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
@skip_mps
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , test_mean_pixel_difference=UpperCamelCase__ , )
| 667 | 0 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = np.full((len(UpperCAmelCase__ ), sequence_length, 2), UpperCAmelCase__ )
else:
A_ = np.full((len(UpperCAmelCase__ ), sequence_length), UpperCAmelCase__ )
for i, tensor in enumerate(UpperCAmelCase__ ):
if padding_side == "right":
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = tensor[:sequence_length]
else:
A_ = tensor[:sequence_length]
else:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = tensor[:sequence_length]
else:
A_ = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
A_ = ord(UpperCAmelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
A_ = unicodedata.category(UpperCAmelCase__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class A__ ( _snake_case ):
lowercase = 42
lowercase = True
lowercase = None
lowercase = None
lowercase = -100
lowercase = "pt"
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
import torch
A_ = """label""" if """label""" in features[0].keys() else """labels"""
A_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
A_ = self.tokenizer.pad(
UpperCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
A_ = torch.tensor(batch["""entity_ids"""] ).shape[1]
A_ = self.tokenizer.padding_side
if padding_side == "right":
A_ = [
list(UpperCamelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) for label in labels
]
else:
A_ = [
[self.label_pad_token_id] * (sequence_length - len(UpperCamelCase__ )) + list(UpperCamelCase__ ) for label in labels
]
A_ = [feature["""ner_tags"""] for feature in features]
A_ = padding_tensor(UpperCamelCase__ , -1 , UpperCamelCase__ , UpperCamelCase__ )
A_ = [feature["""original_entity_spans"""] for feature in features]
A_ = padding_tensor(UpperCamelCase__ , (-1, -1) , UpperCamelCase__ , UpperCamelCase__ )
A_ = {k: torch.tensor(UpperCamelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 706 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _snake_case ):
lowercase = (IPNDMScheduler,)
lowercase = (("num_inference_steps", 50),)
def snake_case_ ( self , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = {"""num_train_timesteps""": 1000}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case_ ( self , UpperCamelCase__=0 , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
A_ = dummy_past_residuals[:]
if time_step is None:
A_ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
A_ = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
A_ = dummy_past_residuals[:]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**UpperCamelCase__ )
A_ = scheduler_class(**UpperCamelCase__ )
A_ = 10
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = dict(self.forward_default_kwargs )
A_ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
A_ = self.dummy_sample
A_ = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps""" ):
A_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A_ = dummy_past_residuals[:]
A_ = scheduler.timesteps[5]
A_ = scheduler.timesteps[6]
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.full_loop()
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 667 | 0 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = 0
A_ = [0]
A_ = [0]
A_ = len(UpperCamelCase__ )
self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 0 )
A_ = [60]
A_ = [10]
A_ = len(UpperCamelCase__ )
self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 0 )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = 3
A_ = [1, 2, 3]
A_ = [3, 2, 1]
A_ = len(UpperCamelCase__ )
self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 5 )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = 50
A_ = [60, 100, 120]
A_ = [10, 20, 30]
A_ = len(UpperCamelCase__ )
self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 707 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
A_ = list(s_dict.keys() )
for key in keys:
A_ = r""".*/layers_(\d+)"""
A_ = key
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.sub(r"""layers_(\d+)""", r"""block/\1/layer""", UpperCAmelCase__ )
A_ = r"""(encoder|decoder)\/"""
if re.match(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = re.match(UpperCAmelCase__, UpperCAmelCase__ ).groups()
if groups[0] == "encoder":
A_ = re.sub(r"""/mlp/""", r"""/1/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/1/layer_norm/""", UpperCAmelCase__ )
elif groups[0] == "decoder":
A_ = re.sub(r"""/mlp/""", r"""/2/mlp/""", UpperCAmelCase__ )
A_ = re.sub(r"""/pre_mlp_layer_norm/""", r"""/2/layer_norm/""", UpperCAmelCase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''{key} -> {new_key}''' )
A_ = s_dict.pop(UpperCAmelCase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ = s_dict[key].shape[0]
A_ = s_dict[key]
for idx in range(UpperCAmelCase__ ):
A_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/", "nested fstring" )}''' )
s_dict.pop(UpperCAmelCase__ )
return s_dict
__lowerCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase__, """r""" ) as f:
A_ = f.read()
A_ = re.findall(r"""(.*) = ([0-9.]*)""", UpperCAmelCase__ )
A_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ = float(UpperCAmelCase__ ) if """.""" in value else int(UpperCAmelCase__ )
A_ = re.findall(r"""(.*activations) = \(\'(.*)\',\)""", UpperCAmelCase__ )[0]
A_ = str(activation[1] )
A_ = num_experts
A_ = SwitchTransformersConfig(**UpperCAmelCase__ )
return config
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None, UpperCAmelCase__="./", UpperCAmelCase__=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
if gin_file is not None:
A_ = convert_gin_to_config(UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = SwitchTransformersConfig.from_pretrained(UpperCAmelCase__ )
A_ = SwitchTransformersForConditionalGeneration(UpperCAmelCase__ )
A_ = flax_params["""target"""]
A_ = flatten_dict(UpperCAmelCase__, sep="""/""" )
A_ = rename_keys(UpperCAmelCase__ )
A_ = unflatten_dict(UpperCAmelCase__, sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase__, UpperCAmelCase__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 667 | 0 |
'''simple docstring'''
from math import loga
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise TypeError("""Input value must be a 'int' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A_ , A_ = 1, 1
for _ in range(number_of_steps - 1 ):
A_ , A_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=18 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> List[str]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size if size is not None else {"""height""": 18, """width""": 20}
A_ = do_thumbnail
A_ = do_align_axis
A_ = do_pad
A_ = do_normalize
A_ = image_mean
A_ = image_std
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A__ ( _snake_case , unittest.TestCase ):
lowercase = DonutImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = DonutImageProcessingTester(self )
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_flaky()
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A_ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 709 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 710 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
# word like '180' or '身高' or '神'
for char in word:
A_ = ord(UpperCAmelCase__ )
if not _is_chinese_char(UpperCAmelCase__ ):
return 0
return 1
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = set()
for token in tokens:
A_ = len(UpperCAmelCase__ ) > 1 and is_chinese(UpperCAmelCase__ )
if chinese_word:
word_set.add(UpperCAmelCase__ )
A_ = list(UpperCAmelCase__ )
return word_list
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
if not chinese_word_set:
return bert_tokens
A_ = max([len(UpperCAmelCase__ ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(UpperCAmelCase__ )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start, UpperCAmelCase__ )
for i in range(UpperCAmelCase__, 1, -1 ):
A_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
A_ = """##""" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=["""cws"""] ).cws
A_ = [get_chinese_word(UpperCAmelCase__ ) for r in res]
ltp_res.extend(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for i in range(0, len(UpperCAmelCase__ ), 1_00 ):
A_ = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
A_ = []
for input_ids, chinese_word in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(UpperCAmelCase__ )
input_tokens.append(UpperCAmelCase__ )
A_ = add_sub_symbol(UpperCAmelCase__, UpperCAmelCase__ )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCAmelCase__ ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(UpperCAmelCase__ ) == 1 and _is_chinese_char(ord(UpperCAmelCase__ ) ):
ref_id.append(UpperCAmelCase__ )
ref_ids.append(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
return ref_ids
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Optional[Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name, """r""", encoding="""utf-8""" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(UpperCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
with open(args.save_path, """w""", encoding="""utf-8""" ) as f:
A_ = [json.dumps(UpperCAmelCase__ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__lowerCamelCase = parser.parse_args()
main(args)
| 667 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A__ ( _snake_case ):
lowercase = "esm"
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1026 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = emb_layer_norm_before
A_ = token_dropout
A_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
A_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = EsmFoldConfig(**UpperCamelCase__ )
A_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
A_ = get_default_vocab_list()
else:
A_ = vocab_list
else:
A_ = None
A_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
A_ = self.esmfold_config.to_dict()
return output
@dataclass
class A__ :
lowercase = None
lowercase = True
lowercase = False
lowercase = False
lowercase = False
lowercase = 0
lowercase = True
lowercase = False
lowercase = 128
lowercase = None
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
if self.trunk is None:
A_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
A_ = TrunkConfig(**self.trunk )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = asdict(self )
A_ = self.trunk.to_dict()
return output
@dataclass
class A__ :
lowercase = 48
lowercase = 1_024
lowercase = 128
lowercase = 32
lowercase = 32
lowercase = 32
lowercase = 0
lowercase = 0
lowercase = False
lowercase = 4
lowercase = 128
lowercase = None
def snake_case_ ( self ) -> Any:
'''simple docstring'''
if self.structure_module is None:
A_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
A_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
A_ = self.sequence_state_dim // self.sequence_head_width
A_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = asdict(self )
A_ = self.structure_module.to_dict()
return output
@dataclass
class A__ :
lowercase = 384
lowercase = 128
lowercase = 16
lowercase = 128
lowercase = 12
lowercase = 4
lowercase = 8
lowercase = 0.1
lowercase = 8
lowercase = 1
lowercase = 2
lowercase = 7
lowercase = 10
lowercase = 1e-8
lowercase = 1e5
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return asdict(self )
def UpperCAmelCase__ ( ) -> List[str]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 711 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCamelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
if not isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A_ = []
for num in range(len(UpperCAmelCase__ ) ):
A_ = 0
while 2 * i * i <= odd_composites[num]:
A_ = odd_composites[num] - 2 * i * i
if is_prime(UpperCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = [[] for _ in range(UpperCAmelCase__ )]
A_ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(UpperCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(UpperCAmelCase__ ):
A_ = position % (lowest * 2) # puts it in bounds
A_ = min(UpperCAmelCase__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCAmelCase__ )
A_ = ["""""".join(UpperCAmelCase__ ) for row in temp_grid]
A_ = """""".join(UpperCAmelCase__ )
return output_string
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = []
A_ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
A_ = [[] for _ in range(UpperCAmelCase__ )] # generates template
for position in range(len(UpperCAmelCase__ ) ):
A_ = position % (lowest * 2) # puts it in bounds
A_ = min(UpperCAmelCase__, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
A_ = 0
for row in temp_grid: # fills in the characters
A_ = input_string[counter : counter + len(UpperCAmelCase__ )]
grid.append(list(UpperCAmelCase__ ) )
counter += len(UpperCAmelCase__ )
A_ = """""" # reads as zigzag
for position in range(len(UpperCAmelCase__ ) ):
A_ = position % (lowest * 2) # puts it in bounds
A_ = min(UpperCAmelCase__, lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> dict[int, str]:
A_ = {}
for key_guess in range(1, len(UpperCAmelCase__ ) ): # tries every key
A_ = decrypt(UpperCAmelCase__, UpperCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 0, UpperCAmelCase__ = 0 ) -> int:
A_ = right or len(UpperCAmelCase__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCAmelCase__, UpperCAmelCase__, left + 1, right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(UpperCAmelCase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.