code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
_a = tuple[int, int, int]
_a = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_a = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
_a = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
_a = """FOBHMDKEXQNRAULPGSJVTYICZW"""
_a = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
_a = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
_a = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
_a = """SGLCPQWZHKXAREONTFBVIYJUDM"""
_a = """HVSICLTYKQUBXDWAJZOMFGPREN"""
_a = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
_a = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
_a = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
if (unique_rotsel := len(set(lowerCAmelCase__ ) )) < 3:
_UpperCamelCase = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(lowerCAmelCase__ )
# Checks if rotor positions are valid
_UpperCamelCase = rotpos
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
_UpperCamelCase = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
_UpperCamelCase = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCAmelCase__ )
if not 0 < rotorposa <= len(lowerCAmelCase__ ):
_UpperCamelCase = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(lowerCAmelCase__ )
# Validates string and returns dict
_UpperCamelCase = _plugboard(lowerCAmelCase__ )
return rotpos, rotsel, pbdict
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCamelCase = F'''Plugboard setting isn\'t type string ({type(lowerCAmelCase__ )})'''
raise TypeError(lowerCAmelCase__ )
elif len(lowerCAmelCase__ ) % 2 != 0:
_UpperCamelCase = F'''Odd number of symbols ({len(lowerCAmelCase__ )})'''
raise Exception(lowerCAmelCase__ )
elif pbstring == "":
return {}
pbstring.replace(''' ''', '''''' )
# Checks if all characters are unique
_UpperCamelCase = set()
for i in pbstring:
if i not in abc:
_UpperCamelCase = F'''\'{i}\' not in list of symbols'''
raise Exception(lowerCAmelCase__ )
elif i in tmppbl:
_UpperCamelCase = F'''Duplicate symbol ({i})'''
raise Exception(lowerCAmelCase__ )
else:
tmppbl.add(lowerCAmelCase__ )
del tmppbl
# Created the dictionary
_UpperCamelCase = {}
for j in range(0, len(lowerCAmelCase__ ) - 1, 2 ):
_UpperCamelCase = pbstring[j + 1]
_UpperCamelCase = pbstring[j]
return pb
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case = (rotora, rotora, rotora), __snake_case = "", ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = text.upper()
_UpperCamelCase = _validator(
lowerCAmelCase__, lowerCAmelCase__, plugb.upper() )
_UpperCamelCase = rotor_position
_UpperCamelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_UpperCamelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_UpperCamelCase = plugboard[symbol]
# rotor ra --------------------------
_UpperCamelCase = abc.index(lowerCAmelCase__ ) + rotorposa
_UpperCamelCase = rotora[index % len(lowerCAmelCase__ )]
# rotor rb --------------------------
_UpperCamelCase = abc.index(lowerCAmelCase__ ) + rotorposa
_UpperCamelCase = rotora[index % len(lowerCAmelCase__ )]
# rotor rc --------------------------
_UpperCamelCase = abc.index(lowerCAmelCase__ ) + rotorposa
_UpperCamelCase = rotora[index % len(lowerCAmelCase__ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_UpperCamelCase = reflector[symbol]
# 2nd rotors
_UpperCamelCase = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
_UpperCamelCase = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
_UpperCamelCase = abc[rotora.index(lowerCAmelCase__ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_UpperCamelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
_UpperCamelCase = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
_UpperCamelCase = 0
rotorposa += 1
if rotorposa >= len(lowerCAmelCase__ ):
_UpperCamelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
_a = """This is my Python script that emulates the Enigma machine from WWII."""
_a = (1, 1, 1)
_a = """pictures"""
_a = (rotora, rotora, rotora)
_a = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 194 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ ={
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
lowercase__ ='ETAOINSHRDLCUMWFGYPBVKJXQZ'
lowercase__ ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : List[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCamelCase ( lowerCAmelCase__ : tuple ):
return x[0]
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : Optional[Any] = get_letter_count(lowerCAmelCase__ )
__a : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
__a : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
__a : int = ''''''.join(freq_to_letter[freq] )
__a : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
__a : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = get_frequency_order(lowerCAmelCase__ )
__a : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = filter(lambda UpperCAmelCase : p.requires_grad , model.parameters() )
a_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase_ = logging.getLogger(__name__)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
if metric == "rouge2":
a_ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
a_ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
a_ = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
a_ = ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F'''val_{metric}''' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=a__ , verbose=a__ , )
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lowercase_)
@rank_zero_only
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True) ->None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''')
a_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
a_ = Path(pl_module.hparams.output_dir)
if type_path == "test":
a_ = od / "test_results.txt"
a_ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a_ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
a_ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=lowercase_)
generations_file.parent.mkdir(exist_ok=lowercase_)
with open(lowercase_ , "a+") as writer:
for key in sorted(lowercase_):
if key in ["log", "progress_bar", "preds"]:
continue
a_ = metrics[key]
if isinstance(lowercase_ , torch.Tensor):
a_ = val.item()
a_ = F'''{key}: {val:.6f}\n'''
writer.write(lowercase_)
if not save_generations:
return
if "preds" in metrics:
a_ = "\n".join(metrics["preds"])
generations_file.open("w+").write(lowercase_)
@rank_zero_only
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Dict:
try:
a_ = pl_module.model.model.num_parameters()
except AttributeError:
a_ = pl_module.model.num_parameters()
a_ = count_trainable_parameters(lowercase_)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6})
@rank_zero_only
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(lowercase_ , lowercase_ , "test")
@rank_zero_only
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 303 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = StableDiffusionPanoramaPipeline
_UpperCAmelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DDIMScheduler()
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE_: str = CLIPTextModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
SCREAMING_SNAKE_CASE_: int = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int]=0):
SCREAMING_SNAKE_CASE_: Optional[int] = torch.manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Tuple = StableDiffusionPanoramaPipeline(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = sd_pipe(**lowerCAmelCase__).images
SCREAMING_SNAKE_CASE_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Any = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : List[str]):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Union[str, Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = "french fries"
SCREAMING_SNAKE_CASE_: List[str] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = output.images
SCREAMING_SNAKE_CASE_: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: int = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Any = StableDiffusionPanoramaPipeline(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = sd_pipe(**lowerCAmelCase__ , view_batch_size=2)
SCREAMING_SNAKE_CASE_: List[Any] = output.images
SCREAMING_SNAKE_CASE_: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Any = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Any = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear")
SCREAMING_SNAKE_CASE_: Tuple = StableDiffusionPanoramaPipeline(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = sd_pipe(**lowerCAmelCase__).images
SCREAMING_SNAKE_CASE_: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: List[Any] = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Optional[int] = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self.get_dummy_inputs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = sd_pipe(**lowerCAmelCase__).images
SCREAMING_SNAKE_CASE_: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Optional[int] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[Any]=0):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[Any] = "stabilityai/stable-diffusion-2-base"
SCREAMING_SNAKE_CASE_: Any = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler")
SCREAMING_SNAKE_CASE_: int = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: List[str] = self.get_inputs()
SCREAMING_SNAKE_CASE_: Tuple = pipe(**lowerCAmelCase__).images
SCREAMING_SNAKE_CASE_: int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
SCREAMING_SNAKE_CASE_: List[str] = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
])
assert np.abs(expected_slice - image_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_inputs()
SCREAMING_SNAKE_CASE_: List[str] = pipe(**lowerCAmelCase__).images
SCREAMING_SNAKE_CASE_: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
SCREAMING_SNAKE_CASE_: int = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Tuple = 0
def callback_fn(lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : torch.FloatTensor) -> None:
SCREAMING_SNAKE_CASE_: str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE_: int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
SCREAMING_SNAKE_CASE_: str = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Any = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE_: str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
SCREAMING_SNAKE_CASE_: Dict = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[str] = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[int] = "stabilityai/stable-diffusion-2-base"
SCREAMING_SNAKE_CASE_: Dict = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler")
SCREAMING_SNAKE_CASE_: List[str] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: List[Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_: Optional[Any] = "stabilityai/stable-diffusion-2-base"
SCREAMING_SNAKE_CASE_: Optional[Any] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler")
SCREAMING_SNAKE_CASE_: Dict = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_inputs()
SCREAMING_SNAKE_CASE_: int = pipe(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 13 |
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution()) | 312 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'mobilenet_v1'
def __init__( self , __lowerCamelCase=3 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1.0 , __lowerCamelCase=8 , __lowerCamelCase="relu6" , __lowerCamelCase=True , __lowerCamelCase=0.999 , __lowerCamelCase=0.02 , __lowerCamelCase=0.001 , **__lowerCamelCase , ) -> Tuple:
super().__init__(**__lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : Tuple = image_size
_SCREAMING_SNAKE_CASE : Any = depth_multiplier
_SCREAMING_SNAKE_CASE : int = min_depth
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
_SCREAMING_SNAKE_CASE : str = tf_padding
_SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob
_SCREAMING_SNAKE_CASE : Dict = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCamelCase_ ( self ) -> float:
return 1E-4 | 325 |
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 325 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
assert isinstance(UpperCamelCase , UpperCamelCase ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCamelCase__ : Optional[Any] = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(UpperCamelCase )
else:
lowerCamelCase__ : Optional[Any] = sylvester(number - 1 )
lowerCamelCase__ : Union[str, Any] = num - 1
lowerCamelCase__ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 41 |
'''simple docstring'''
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: list[int] ):
lowerCamelCase__ : Union[str, Any] = len(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: int , UpperCamelCase__: int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
def a__ ( A_ ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
__magic_name__ = len(A_ )
__magic_name__ = max(A_ )
__magic_name__ = min(A_ )
# create the counting array
__magic_name__ = coll_max + 1 - coll_min
__magic_name__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1, A_ ):
__magic_name__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__magic_name__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0, A_ ) ):
__magic_name__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a__ ( A_ ):
'''simple docstring'''
return "".join([chr(A_ ) for i in counting_sort([ord(A_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : Dict = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 88 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowerCAmelCase : Any = get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = (
os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__magic_name__ = Extractor
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__magic_name__ = os.path.abspath(UpperCamelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) )
def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ ))
)
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str:
"""simple docstring"""
__magic_name__ = self.extractor.infer_extractor_format(UpperCamelCase__ )
if not extractor_format:
return input_path
__magic_name__ = self._get_output_path(UpperCamelCase__ )
if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ):
self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return output_path
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
@abstractmethod
def _lowercase ( cls : List[str] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : Union[str, Any] ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
...
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
a__ = []
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , """rb""" ) as f:
return f.read(UpperCamelCase__ )
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
__magic_name__ = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
__magic_name__ = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(UpperCamelCase__ )
@staticmethod
def _lowercase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
def resolved(UpperCamelCase__ : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase__ ) )
def badpath(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ )
def badlink(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__magic_name__ = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase__ )
__magic_name__ = resolved(UpperCamelCase__ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = tarfile.open(UpperCamelCase__ )
tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) )
tar_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x1F\x8B"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase__ , """rb""" ) as fp:
__magic_name__ = _EndRecData(UpperCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__magic_name__ = fp.read(UpperCamelCase__ ) # CD is where we expect it to be
if len(UpperCamelCase__ ) == sizeCentralDir:
__magic_name__ = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file:
zip_file.extractall(UpperCamelCase__ )
zip_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(UpperCamelCase__ ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = rarfile.RarFile(UpperCamelCase__ )
rf.extractall(UpperCamelCase__ )
rf.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__magic_name__ = zstd.ZstdDecompressor()
with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh:
dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x42\x5A\x68"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive:
archive.extractall(UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ :
'''simple docstring'''
a__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowercase ( cls : Tuple ) -> Tuple:
"""simple docstring"""
return max(
len(UpperCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase__ , UpperCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ )
except OSError:
return b""
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = cls.infer_extractor_format(UpperCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowercase ( cls : Dict , UpperCamelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
__magic_name__ = cls._get_magic_number_max_length()
__magic_name__ = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return extractor_format
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ )
# Prevent parallel extractions
__magic_name__ = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) )
with FileLock(UpperCamelCase__ ):
shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = extractor if extractor != """deprecated""" else extractor_format
else:
__magic_name__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=UpperCamelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase__ ):
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
| 88 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 368 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , __A = True , **__A , ) -> None:
super().__init__(**__A )
a =size if size is not None else {'''shortest_edge''': 224}
a =get_size_dict(__A , default_to_square=__A )
a =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a =get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' )
a =do_resize
a =size
a =resample
a =do_center_crop
a =crop_size
a =do_rescale
a =rescale_factor
a =do_normalize
a =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a =image_std if image_std is not None else OPENAI_CLIP_STD
a =do_convert_rgb
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
a =get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> Any:
return rescale(__A , scale=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
a =do_resize if do_resize is not None else self.do_resize
a =size if size is not None else self.size
a =get_size_dict(__A , param_name='''size''' , default_to_square=__A )
a =resample if resample is not None else self.resample
a =do_center_crop if do_center_crop is not None else self.do_center_crop
a =crop_size if crop_size is not None else self.crop_size
a =get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A )
a =do_rescale if do_rescale is not None else self.do_rescale
a =rescale_factor if rescale_factor is not None else self.rescale_factor
a =do_normalize if do_normalize is not None else self.do_normalize
a =image_mean if image_mean is not None else self.image_mean
a =image_std if image_std is not None else self.image_std
a =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a =[convert_to_rgb(__A ) for image in images]
# All transformations expect numpy arrays.
a =[to_numpy_array(__A ) for image in images]
if do_resize:
a =[self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
a =[self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
a =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
a =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
a =[to_channel_dimension_format(__A , __A ) for image in images]
a ={'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A ) | 215 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = "sample"
snake_case_ = 1e-2
@property
def A_ ( self : Dict ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ )
return {"sample": image}
@property
def A_ ( self : List[Any] ):
return (3, 32, 32)
@property
def A_ ( self : Dict ):
return (3, 32, 32)
def A_ ( self : Union[str, Any] ):
snake_case_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def A_ ( self : Any ):
pass
def A_ ( self : str ):
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def A_ ( self : Dict ):
# enable deterministic behavior for gradient checkpointing
snake_case_ ,snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.model_class(**lowercase_ )
model.to(lowercase_ )
assert not model.is_gradient_checkpointing and model.training
snake_case_ = model(**lowercase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
snake_case_ = torch.randn_like(lowercase_ )
snake_case_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
snake_case_ = self.model_class(**lowercase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowercase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
snake_case_ = model_a(**lowercase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
snake_case_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
snake_case_ = dict(model.named_parameters() )
snake_case_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def A_ ( self : Tuple ):
snake_case_ ,snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowercase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A_ ( self : Tuple ):
snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
snake_case_ = model.to(lowercase_ )
model.eval()
if torch_device == "mps":
snake_case_ = torch.manual_seed(0 )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = image.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , sample_posterior=lowercase_ , generator=lowercase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
snake_case_ = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
snake_case_ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
snake_case_ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1e-2 ) )
@slow
class a ( unittest.TestCase ):
def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
return F"gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy"
def A_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=(4, 3, 512, 512) , lowercase_ : Optional[Any]=False ):
snake_case_ = torch.floataa if fpaa else torch.floataa
snake_case_ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) ).to(lowercase_ ).to(lowercase_ )
return image
def A_ ( self : Any , lowercase_ : Dict="CompVis/stable-diffusion-v1-4" , lowercase_ : List[str]=False ):
snake_case_ = '''fp16''' if fpaa else None
snake_case_ = torch.floataa if fpaa else torch.floataa
snake_case_ = AutoencoderKL.from_pretrained(
lowercase_ , subfolder='''vae''' , torch_dtype=lowercase_ , revision=lowercase_ , )
model.to(lowercase_ ).eval()
return model
def A_ ( self : Any , lowercase_ : int=0 ):
if torch_device == "mps":
return torch.manual_seed(lowercase_ )
return torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , fpaa=lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case_ = sample[-1, -2:, :2, -2:].flatten().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Optional[Any] , lowercase_ : List[str] ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Optional[Any] , lowercase_ : Any ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model.encode(lowercase_ ).latent_dist
snake_case_ = dist.sample(generator=lowercase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
snake_case_ = sample[0, -1, -3:, -3:].flatten().cpu()
snake_case_ = torch.tensor(lowercase_ )
snake_case_ = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(lowercase_ , lowercase_ , atol=lowercase_ )
| 56 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(snake_case_ , np.ndarray ):
return list(tensor.shape )
__snake_case = tf.shape(snake_case_ )
if tensor.shape == tf.TensorShape(snake_case_ ):
return dynamic
__snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )]
def lowerCamelCase__ ( snake_case_ : tf.Tensor , snake_case_ : Optional[int] = None , snake_case_ : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=snake_case_ , name=snake_case_ )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Optional[int]=1e-5 , snake_case_ : List[str]=-1 ) -> Tuple:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__snake_case , __snake_case = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__snake_case = [1] * inputs.shape.rank
__snake_case = shape_list(snake_case_ )[axis]
__snake_case = tf.reshape(snake_case_ , snake_case_ )
__snake_case = tf.reshape(snake_case_ , snake_case_ )
# Compute layer normalization using the batch_normalization
# function.
__snake_case = tf.nn.batch_normalization(
snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , )
return outputs
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Optional[int]=0 , snake_case_ : str=-1 ) -> Dict:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__snake_case = tf.shape(snake_case_ )
__snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case_ , snake_case_ )
def lowerCamelCase__ ( snake_case_ : tf.Tensor ) -> tf.Tensor:
if not isinstance(snake_case_ , tf.Tensor ):
__snake_case = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCamelCase__ ( snake_case_ : tf.Tensor , snake_case_ : int , snake_case_ : str = "input_ids" ) -> None:
tf.debugging.assert_less(
snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Any ) -> List[Any]:
__snake_case = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__snake_case = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
__snake_case = np.asarray(snake_case_ )
__snake_case = 1
__snake_case = np.array_split(snake_case_ , snake_case_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__snake_case = np.array_split(snake_case_ , snake_case_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case_ ):
__snake_case = chunk_data
else:
__snake_case = data
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : Optional[Any] ) -> List[str]:
if name in group.attrs:
__snake_case = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]]
else:
__snake_case = []
__snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Union[str, Any]:
def _expand_single_ad_tensor(snake_case_ : List[str] ):
if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
| 363 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = ['input_ids', 'attention_mask']
A_ : Optional[Any] = None
def __init__(self : Optional[int] , a__ : int=None , a__ : str=None , a__ : Any=None , a__ : List[Any]="<unk>" , a__ : List[Any]="<s>" , a__ : Optional[int]="</s>" , a__ : List[str]="<pad>" , a__ : Union[str, Any]=False , a__ : str=False , **a__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , pad_token=a__ , add_prefix_space=a__ , clean_up_tokenization_spaces=a__ , **a__ , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , a__ ) != add_prefix_space:
__snake_case = getattr(a__ , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**a__ )
__snake_case = add_prefix_space
def a (self : int , *a__ : Tuple , **a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = kwargs.get('''is_split_into_words''' , a__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*a__ , **a__ )
def a (self : List[str] , *a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
__snake_case = kwargs.get('''is_split_into_words''' , a__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*a__ , **a__ )
def a (self : List[Any] , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
__snake_case = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def a (self : Tuple , a__ : "Conversation" ):
"""simple docstring"""
__snake_case = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
__snake_case = input_ids[-self.model_max_length :]
return input_ids
| 238 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = "naver-clova-ix/donut-base-finetuned-docvqa"
lowerCAmelCase__ : List[Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowerCAmelCase__ : Union[str, Any] = "document_qa"
lowerCAmelCase__ : Any = AutoProcessor
lowerCAmelCase__ : Union[str, Any] = VisionEncoderDecoderModel
lowerCAmelCase__ : Optional[Any] = ["image", "text"]
lowerCAmelCase__ : List[Any] = ["text"]
def __init__( self : Any , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Any , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__lowercase = task_prompt.replace('{user_input}' , _UpperCAmelCase )
__lowercase = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='pt' ).input_ids
__lowercase = self.pre_processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def a__ ( self : Dict , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def a__ ( self : str , _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.pre_processor.batch_decode(_UpperCAmelCase )[0]
__lowercase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__lowercase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__lowercase = re.sub(R'<.*?>' , '' , _UpperCAmelCase , count=1 ).strip() # remove first task start token
__lowercase = self.pre_processor.tokenajson(_UpperCAmelCase )
return sequence["answer"]
| 325 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : WhisperForConditionalGeneration , lowerCamelCase__ : WhisperProcessor , lowerCamelCase__ : AutoencoderKL , lowerCamelCase__ : CLIPTextModel , lowerCamelCase__ : CLIPTokenizer , lowerCamelCase__ : UNetaDConditionModel , lowerCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase__ : StableDiffusionSafetyChecker , lowerCamelCase__ : CLIPImageProcessor , ) -> List[str]:
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase__ , speech_processor=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[Union[str, int]] = "auto" ) -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
UpperCamelCase__ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase__ )
@torch.no_grad()
def __call__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=16000 , lowerCamelCase__ : int = 512 , lowerCamelCase__ : int = 512 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : List[str] , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = self.speech_processor.feature_extractor(
lowerCamelCase__ , return_tensors='''pt''' , sampling_rate=lowerCamelCase__ ).input_features.to(self.device )
UpperCamelCase__ : str = self.speech_model.generate(lowerCamelCase__ , max_length=480000 )
UpperCamelCase__ : Dict = self.speech_processor.tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , normalize=lowerCamelCase__ )[
0
]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Optional[Any] = 1
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = len(lowerCamelCase__ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowerCamelCase__ )}." )
# get prompt text embeddings
UpperCamelCase__ : int = self.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCamelCase__ : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase__ : List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = text_embeddings.shape
UpperCamelCase__ : List[Any] = text_embeddings.repeat(1 , lowerCamelCase__ , 1 )
UpperCamelCase__ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ : List[str]
if negative_prompt is None:
UpperCamelCase__ : Tuple = [''''''] * batch_size
elif type(lowerCamelCase__ ) is not type(lowerCamelCase__ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase__ )} !="
F" {type(lowerCamelCase__ )}." )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase__ : str = [negative_prompt]
elif batch_size != len(lowerCamelCase__ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase__ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
UpperCamelCase__ : Any = negative_prompt
UpperCamelCase__ : Any = text_input_ids.shape[-1]
UpperCamelCase__ : Optional[int] = self.tokenizer(
lowerCamelCase__ , padding='''max_length''' , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' , )
UpperCamelCase__ : int = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ : List[str] = uncond_embeddings.shape[1]
UpperCamelCase__ : Optional[int] = uncond_embeddings.repeat(1 , lowerCamelCase__ , 1 )
UpperCamelCase__ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ : int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase__ : Union[str, Any] = torch.randn(lowerCamelCase__ , generator=lowerCamelCase__ , device='''cpu''' , dtype=lowerCamelCase__ ).to(
self.device )
else:
UpperCamelCase__ : int = torch.randn(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase__ : Dict = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase__ : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ : Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ : Tuple = {}
if accepts_eta:
UpperCamelCase__ : List[Any] = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ : int = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
UpperCamelCase__ : Optional[Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ : List[Any] = noise_pred.chunk(2 )
UpperCamelCase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : List[Any] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : str = 1 / 0.1_8215 * latents
UpperCamelCase__ : Optional[int] = self.vae.decode(lowerCamelCase__ ).sample
UpperCamelCase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ : int = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase__ , nsfw_content_detected=lowerCamelCase__ )
| 51 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[int]=18 , lowerCamelCase__ : Any=30 , lowerCamelCase__ : int=400 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase__ : str=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : int = image_size
UpperCamelCase__ : str = min_resolution
UpperCamelCase__ : str = max_resolution
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCamelCase__ : Optional[Any] = do_thumbnail
UpperCamelCase__ : int = do_align_axis
UpperCamelCase__ : List[Any] = do_pad
UpperCamelCase__ : List[Any] = do_normalize
UpperCamelCase__ : Dict = image_mean
UpperCamelCase__ : List[Any] = image_std
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Tuple = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[Any] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 51 | 1 |
from __future__ import annotations
a__: Union[str, Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCamelCase__( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[list[int]] , )->Any:
A__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A__ = 1
A__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A__ = init[0]
A__ = init[1]
A__ = 0
A__ = g + heuristic[x][y] # cost from starting cell to destination cell
A__ = [[f, g, x, y]]
A__ = False # flag that is set when search is complete
A__ = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A__ = cell.pop()
A__ = next_cell[2]
A__ = next_cell[3]
A__ = next_cell[1]
if x == goal[0] and y == goal[1]:
A__ = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A__ = x + DIRECTIONS[i][0]
A__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A__ = g + cost
A__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A__ = 1
A__ = i
A__ = []
A__ = goal[0]
A__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A__ = x - DIRECTIONS[action[x][y]][0]
A__ = y - DIRECTIONS[action[x][y]][1]
A__ = xa
A__ = ya
invpath.append([x, y] )
A__ = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
a__: Optional[int] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a__: List[str] = [0, 0]
# all coordinates are given in format [y,x]
a__: Tuple = [len(grid) - 1, len(grid[0]) - 1]
a__: Tuple = 1
# the cost map which pushes the path closer to the goal
a__: Dict = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a__: Dict = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a__: List[str] = 99
a__ , a__: Optional[int] = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 193 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 0 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="attention" ):
'''simple docstring'''
UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
UpperCAmelCase = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
'''simple docstring'''
if split_mlp_wi:
UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
UpperCAmelCase = (wi_a, wi_a)
else:
UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
UpperCAmelCase = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def _lowerCAmelCase ( lowerCAmelCase , *, lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = traverse_util.flatten_dict(variables["""target"""] )
UpperCAmelCase = {"""/""".join(lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase )
UpperCAmelCase = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , """pre_attention_layer_norm""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , """attention""" )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , """pre_mlp_layer_norm""" )
UpperCAmelCase , UpperCAmelCase = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , """encoder""" , lowerCAmelCase )
UpperCAmelCase = layer_norm
if split_mlp_wi:
UpperCAmelCase = wi[0].T
UpperCAmelCase = wi[1].T
else:
UpperCAmelCase = wi.T
UpperCAmelCase = wo.T
UpperCAmelCase = old[
"""encoder/relpos_bias/rel_embedding"""
].T
UpperCAmelCase = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """self_attention""" )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = tax_attention_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """encoder_decoder_attention""" )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase = tax_layer_norm_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , """pre_mlp_layer_norm""" )
UpperCAmelCase , UpperCAmelCase = tax_mlp_lookup(lowerCAmelCase , lowerCAmelCase , """decoder""" , lowerCAmelCase )
UpperCAmelCase = layer_norm
if split_mlp_wi:
UpperCAmelCase = wi[0].T
UpperCAmelCase = wi[1].T
else:
UpperCAmelCase = wi.T
UpperCAmelCase = wo.T
UpperCAmelCase = old["""decoder/decoder_norm/scale"""]
UpperCAmelCase = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase = old["""decoder/logits_dense/kernel"""].T
return new
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCAmelCase = state_dict["""shared.weight"""]
return state_dict
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = checkpoints.load_tax_checkpoint(lowerCAmelCase )
UpperCAmelCase = convert_tax_to_pytorch(lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase )
UpperCAmelCase = make_state_dict(lowerCAmelCase , lowerCAmelCase )
model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ):
'''simple docstring'''
UpperCAmelCase = TaConfig.from_json_file(lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase = TaEncoderModel(lowerCAmelCase )
else:
UpperCAmelCase = TaForConditionalGeneration(lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 248 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = str(lowerCAmelCase )
UpperCAmelCase = [n]
for i in range(1 , len(lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if len(str(lowerCAmelCase ) ) > 3:
if not is_prime(int(str(lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(lowerCAmelCase )[:3] ) ):
return False
return True
def _lowerCAmelCase ( lowerCAmelCase = 11 ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = 13
while len(lowerCAmelCase ) != count:
if validate(lowerCAmelCase ):
UpperCAmelCase = list_truncated_nums(lowerCAmelCase )
if all(is_prime(lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(lowerCAmelCase )
num += 2
return list_truncated_primes
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(1_1)) = }')
| 248 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(1_0_0, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 45 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def snake_case_ ( lowerCAmelCase_ )-> typing.Counter[int]:
'''simple docstring'''
_UpperCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_ , max_perimeter + 1 ):
_UpperCAmelCase : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def snake_case_ ( lowerCAmelCase_ = 1000 )-> int:
'''simple docstring'''
_UpperCAmelCase : int = pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 215 | 0 |
"""simple docstring"""
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
__lowerCAmelCase = '''image_segmenter'''
__lowerCAmelCase = CLIPSegForImageSegmentation
__lowerCAmelCase = ['''image''', '''text''']
__lowerCAmelCase = ['''image''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''vision'''] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCAmelCase , return_tensors='''pt''' )
def _lowerCamelCase ( self , _UpperCAmelCase ):
with torch.no_grad():
__a : List[str] = self.model(**_UpperCAmelCase ).logits
return logits
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = outputs.cpu().detach().numpy()
__a : int = 0
__a : Optional[int] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 355 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
__a : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) )
__a : Any = np.random.RandomState(_UpperCAmelCase )
__a : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Dict = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__a : List[Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# warmup pass to apply optimizations
__a : Any = pipe(**self.get_dummy_inputs() )
__a : List[str] = self.get_dummy_inputs()
__a : Tuple = pipe(**_UpperCAmelCase ).images
__a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : int = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[Any] = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs()
__a : str = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ):
__a : Optional[Any] = ort.SessionOptions()
__a : Any = False
return options
def _lowerCamelCase ( self ):
__a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Tuple = '''A fantasy landscape, trending on artstation'''
__a : Tuple = np.random.RandomState(0 )
__a : int = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : List[Any] = output.images
__a : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Any = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ):
__a : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
__a : str = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[str] = '''A fantasy landscape, trending on artstation'''
__a : str = np.random.RandomState(0 )
__a : str = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Dict = output.images
__a : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Dict = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 188 | 0 |
"""simple docstring"""
import pprint
import requests
lowercase__ : Any = "https://zenquotes.io/api"
def UpperCamelCase_ ( ) -> str:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowercase__ : List[str] = random_quotes()
pprint.pprint(response)
| 224 |
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if b == 0:
return (1, 0)
((lowerCamelCase__) , (lowerCamelCase__)) : Any =extended_euclid(__lowerCamelCase , a % b )
lowerCamelCase__ : Optional[Any] =a // b
return (y, x - k * y)
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
((lowerCamelCase__) , (lowerCamelCase__)) : Any =extended_euclid(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[Any] =na * na
lowerCamelCase__ : Union[str, Any] =ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
((lowerCamelCase__) , (lowerCamelCase__)) : int =extended_euclid(__lowerCamelCase , __lowerCamelCase )
if b < 0:
lowerCamelCase__ : Any =(b % n + n) % n
return b
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Any =invert_modulo(__lowerCamelCase , __lowerCamelCase ), invert_modulo(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Tuple =na * na
lowerCamelCase__ : Optional[Any] =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 238 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = checkpoint
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : Optional[Any] = vae_state_dict['''encoder.conv_in.weight''']
lowerCAmelCase__ : Dict = vae_state_dict['''encoder.conv_in.bias''']
lowerCAmelCase__ : Any = vae_state_dict['''encoder.conv_out.weight''']
lowerCAmelCase__ : Tuple = vae_state_dict['''encoder.conv_out.bias''']
lowerCAmelCase__ : List[Any] = vae_state_dict['''encoder.norm_out.weight''']
lowerCAmelCase__ : Optional[Any] = vae_state_dict['''encoder.norm_out.bias''']
lowerCAmelCase__ : Tuple = vae_state_dict['''decoder.conv_in.weight''']
lowerCAmelCase__ : List[str] = vae_state_dict['''decoder.conv_in.bias''']
lowerCAmelCase__ : int = vae_state_dict['''decoder.conv_out.weight''']
lowerCAmelCase__ : List[str] = vae_state_dict['''decoder.conv_out.bias''']
lowerCAmelCase__ : Dict = vae_state_dict['''decoder.norm_out.weight''']
lowerCAmelCase__ : Optional[Any] = vae_state_dict['''decoder.norm_out.bias''']
lowerCAmelCase__ : str = vae_state_dict['''quant_conv.weight''']
lowerCAmelCase__ : List[str] = vae_state_dict['''quant_conv.bias''']
lowerCAmelCase__ : Optional[Any] = vae_state_dict['''post_quant_conv.weight''']
lowerCAmelCase__ : List[Any] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase__ : Optional[Any] = len({'''.'''.join(layer.split('''.''')[:3]) for layer in vae_state_dict if '''encoder.down''' in layer})
lowerCAmelCase__ : Union[str, Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(lowerCamelCase_)
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase__ : str = len({'''.'''.join(layer.split('''.''')[:3]) for layer in vae_state_dict if '''decoder.up''' in layer})
lowerCAmelCase__ : int = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(lowerCamelCase_)
}
for i in range(lowerCamelCase_):
lowerCAmelCase__ : int = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowerCAmelCase__ : Any = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""")
lowerCAmelCase__ : Union[str, Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""")
lowerCAmelCase__ : List[str] = renew_vae_resnet_paths(lowerCamelCase_)
lowerCAmelCase__ : Dict = {'''old''': f"""down.{i}.block""", '''new''': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowerCAmelCase__ : Optional[Any] = 2
for i in range(1 ,num_mid_res_blocks + 1):
lowerCAmelCase__ : Union[str, Any] = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowerCAmelCase__ : Optional[int] = renew_vae_resnet_paths(lowerCamelCase_)
lowerCAmelCase__ : int = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
lowerCAmelCase__ : List[Any] = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowerCAmelCase__ : Dict = renew_vae_attention_paths(lowerCamelCase_)
lowerCAmelCase__ : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
conv_attn_to_linear(lowerCamelCase_)
for i in range(lowerCamelCase_):
lowerCAmelCase__ : int = num_up_blocks - 1 - i
lowerCAmelCase__ : int = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowerCAmelCase__ : Tuple = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowerCAmelCase__ : Union[str, Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowerCAmelCase__ : Optional[int] = renew_vae_resnet_paths(lowerCamelCase_)
lowerCAmelCase__ : Tuple = {'''old''': f"""up.{block_id}.block""", '''new''': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowerCAmelCase__ : Tuple = 2
for i in range(1 ,num_mid_res_blocks + 1):
lowerCAmelCase__ : Any = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowerCAmelCase__ : Optional[Any] = renew_vae_resnet_paths(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = {'''old''': f"""mid.block_{i}""", '''new''': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowerCAmelCase__ : List[Any] = renew_vae_attention_paths(lowerCamelCase_)
lowerCAmelCase__ : Any = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,additional_replacements=[meta_path] ,config=lowerCamelCase_)
conv_attn_to_linear(lowerCamelCase_)
return new_checkpoint
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str ,):
'''simple docstring'''
lowerCAmelCase__ : Any = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''')
lowerCAmelCase__ : Any = io.BytesIO(r.content)
lowerCAmelCase__ : Optional[int] = OmegaConf.load(lowerCamelCase_)
lowerCAmelCase__ : Dict = 512
lowerCAmelCase__ : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors'''):
from safetensors import safe_open
lowerCAmelCase__ : List[str] = {}
with safe_open(lowerCamelCase_ ,framework='''pt''' ,device='''cpu''') as f:
for key in f.keys():
lowerCAmelCase__ : Dict = f.get_tensor(lowerCamelCase_)
else:
lowerCAmelCase__ : List[Any] = torch.load(lowerCamelCase_ ,map_location=lowerCamelCase_)['''state_dict''']
# Convert the VAE model.
lowerCAmelCase__ : str = create_vae_diffusers_config(lowerCamelCase_ ,image_size=lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = custom_convert_ldm_vae_checkpoint(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = AutoencoderKL(**lowerCamelCase_)
vae.load_state_dict(lowerCamelCase_)
vae.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__snake_case : List[str] =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 94 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =XLMProphetNetTokenizer
snake_case_ =False
snake_case_ =True
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : int = XLMProphetNetTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = '''[PAD]'''
lowerCAmelCase__ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''[PAD]''' )
self.assertEqual(vocab_keys[1] ,'''[CLS]''' )
self.assertEqual(vocab_keys[-1] ,'''j''' )
self.assertEqual(len(__lowerCamelCase ) ,10_12 )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,10_12 )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = XLMProphetNetTokenizer(__lowerCamelCase ,keep_accents=__lowerCamelCase )
lowerCAmelCase__ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
lowerCAmelCase__ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] ,)
@cached_property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = '''Hello World!'''
lowerCAmelCase__ : str = [3_53_89, 66_72, 49, 2]
self.assertListEqual(__lowerCamelCase ,self.big_tokenizer.encode(__lowerCamelCase ) )
@slow
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase ,model_name='''microsoft/xprophetnet-large-wiki100-cased''' ,revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' ,)
| 94 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def A (__A : Any ) -> Tuple:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __snake_case ( a ):
@staticmethod
def lowerCamelCase ( _snake_case : ArgumentParser):
"""simple docstring"""
UpperCAmelCase_ = parser.add_parser('''download''')
download_parser.add_argument(
'''--cache-dir''' , type=_snake_case , default=_snake_case , help='''Path to location to store the models''')
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''')
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=_snake_case , help='''Name of the model to download''')
download_parser.set_defaults(func=_snake_case)
def __init__( self : Tuple , _snake_case : str , _snake_case : str , _snake_case : bool , _snake_case : bool):
"""simple docstring"""
UpperCAmelCase_ = model
UpperCAmelCase_ = cache
UpperCAmelCase_ = force
UpperCAmelCase_ = trust_remote_code
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code)
| 51 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Dict=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(_snake_case : Optional[int]):
if isinstance(_snake_case , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta])
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : str=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
UpperCAmelCase_ = 1_0.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''')
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512))
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512))
UpperCAmelCase_ = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''')
assert np.abs(expected_image - image).max() < 9e-2
| 51 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase : Dict = get_tests_dir("fixtures")
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : str ):
# A mock response for an HTTP head request to emulate server down
_UpperCAmelCase : List[str] = mock.Mock()
_UpperCAmelCase : Optional[Any] = 5_0_0
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Tuple = HTTPError
_UpperCAmelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase_ ) as mock_head:
_UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self : List[str] ):
# This test is for deprecated behavior and can be removed in v5
_UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
@classmethod
def snake_case_ ( cls : Optional[int] ):
_UpperCAmelCase : Any = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def snake_case_ ( cls : List[str] ):
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def snake_case_ ( self : str ):
_UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
_UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id="test-feature-extractor" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
_UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
_UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase_ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
_UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def snake_case_ ( self : Dict ):
CustomFeatureExtractor.register_for_auto_class()
_UpperCAmelCase : List[str] = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
_UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(
f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=UpperCamelCase_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 357 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
_lowerCAmelCase : Tuple = json.load(f)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : str , A : Union[str, Any] ):
return FSMTTokenizer.from_pretrained(A )
def snake_case_ ( self : Union[str, Any] , A : Union[str, Any] ):
_UpperCAmelCase : List[Any] = FSMTForConditionalGeneration.from_pretrained(A ).to(A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def snake_case_ ( self : Any , A : Dict , A : List[str] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_UpperCAmelCase : Any = f'facebook/wmt19-{pair}'
_UpperCAmelCase : Dict = self.get_tokenizer(A )
_UpperCAmelCase : Optional[int] = self.get_model(A )
_UpperCAmelCase : int = bleu_data[pair]["src"]
_UpperCAmelCase : Optional[int] = bleu_data[pair]["tgt"]
_UpperCAmelCase : List[str] = tokenizer(A , return_tensors="pt" , truncation=A , padding="longest" ).to(A )
_UpperCAmelCase : List[str] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_UpperCAmelCase : Any = tokenizer.batch_decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A )
_UpperCAmelCase : Any = calculate_bleu(A , A )
print(A )
self.assertGreaterEqual(scores["bleu"] , A )
| 202 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__snake_case : Optional[Any] = 1.054571817e-34 # unit of ℏ : J * s
__snake_case : Union[str, Any] = 3e8 # unit of c : m * s^-1
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
if (force, area, distance).count(0) != 1:
raise ValueError("""One and only one argument must be 0""")
if force < 0:
raise ValueError("""Magnitude of force can not be negative""")
if distance < 0:
raise ValueError("""Distance can not be negative""")
if area < 0:
raise ValueError("""Area can not be negative""")
if force == 0:
a_ : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a_ : Dict = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a_ : int = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""")
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case : List[Any] = logging.get_logger(__name__)
class A__(a_ ):
"""simple docstring"""
_A : Optional[Any] = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = None , _lowercase = None , _lowercase = True , **_lowercase , ) -> None:
super().__init__(**_lowercase )
a_ : Optional[Any] = size if size is not None else {"""height""": 384, """width""": 384}
a_ : List[str] = get_size_dict(_lowercase , default_to_square=_lowercase )
a_ : str = do_resize
a_ : Optional[int] = size
a_ : Dict = resample
a_ : Optional[int] = do_rescale
a_ : Dict = rescale_factor
a_ : int = do_normalize
a_ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
a_ : Any = do_convert_rgb
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> np.ndarray:
a_ : Union[str, Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
a_ : List[str] = (size["""height"""], size["""width"""])
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> Optional[Any]:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> PIL.Image.Image:
a_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
a_ : Any = resample if resample is not None else self.resample
a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
a_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
a_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
a_ : Optional[Any] = image_std if image_std is not None else self.image_std
a_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ : str = size if size is not None else self.size
a_ : Tuple = get_size_dict(_lowercase , default_to_square=_lowercase )
a_ : Optional[int] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ : Optional[Any] = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
a_ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
a_ : Optional[int] = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
a_ : Union[str, Any] = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
a_ : str = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
a_ : Optional[Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
a_ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=_lowercase )
return encoded_outputs
| 248 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_snake_case )
class a_ ( _snake_case ):
UpperCamelCase__ : str =field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCamelCase__ : ClassVar[Features] =Features({"image": Image()} )
UpperCamelCase__ : ClassVar[Features] =Features({"labels": ClassLabel} )
UpperCamelCase__ : str ="image"
UpperCamelCase__ : str ="labels"
def __a ( self :Union[str, Any] , _lowercase :Any) -> Optional[int]:
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features.")
if not isinstance(features[self.label_column] , _lowercase):
raise ValueError(f"Column {self.label_column} is not a ClassLabel.")
UpperCAmelCase_ = copy.deepcopy(self)
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def __a ( self :Union[str, Any]) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 344 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowerCAmelCase = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"""do_convert_rgb""": True,
}
_lowerCAmelCase = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase_ , lowercase_ )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
_lowerCAmelCase = self.get_image_processor(do_normalize=lowercase_ )
_lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=lowercase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(lowercase_ , return_tensors="""np""" )
_lowerCAmelCase = processor(images=lowercase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
_lowerCAmelCase = processor(text=lowercase_ )
_lowerCAmelCase = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(lowercase_ )
_lowerCAmelCase = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = ChineseCLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_lowerCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 82 |
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : List[str] ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Dict , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : List[str] , **_A : str ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
def UpperCAmelCase__ ( *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(_A , ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class __magic_name__ ( metaclass=lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch']
def __init__( self, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def _UpperCAmelCase ( cls, *lowercase_, **lowercase_ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
| 188 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __lowerCamelCase ( lowerCamelCase__=None ):
"""simple docstring"""
lowercase__ : List[Any] = argparse.ArgumentParser(add_help=lowerCamelCase__ , allow_abbrev=lowerCamelCase__ )
# The main config parser
lowercase__ : Tuple = config_command_parser(lowerCamelCase__ )
# The subparser to add commands to
lowercase__ : Optional[Any] = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(lowerCamelCase__ , parents=[parent_parser] )
update_command_parser(lowerCamelCase__ , parents=[parent_parser] )
return config_parser
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = get_config_parser()
lowercase__ : Union[str, Any] = config_parser.parse_args()
if not hasattr(lowerCamelCase__ , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 355 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase__ = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{j}.'''
lowerCAmelCase__ = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase__ : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase__ : int = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = v
lowercase__ : Union[str, Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase__ = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{i}.'''
lowerCAmelCase__ = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase__ : Optional[int] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = v
lowercase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase__ : Optional[int] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
lowercase__ : Dict = reshape_weight_for_sd(lowerCamelCase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = {}
lowercase__ : List[Any] = {}
lowercase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowercase__ : int = k[: -len(".q_proj.weight" )]
lowercase__ : Optional[Any] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowercase__ : Dict = [None, None, None]
lowercase__ : Any = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowercase__ : Optional[int] = k[: -len(".q_proj.bias" )]
lowercase__ : Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowercase__ : str = [None, None, None]
lowercase__ : str = v
continue
lowercase__ : Union[str, Any] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : List[Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : str = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Any = torch.cat(lowerCamelCase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : List[str] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Tuple = torch.cat(lowerCamelCase__ )
return new_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 121 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BioGptTokenizer
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a :Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a :Optional[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
a :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = '''lower newer'''
a :str = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = BioGptTokenizer(self.vocab_file , self.merges_file )
a :Union[str, Any] = '''lower'''
a :Tuple = ['''low''', '''er</w>''']
a :str = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :int = tokens + ['''<unk>''']
a :Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
a :Any = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCamelCase )
a :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCamelCase )
a :int = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
a :str = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 94 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 1000 ):
"""simple docstring"""
a , a :int = 1, 1
a :Any = 2
while True:
a :Optional[int] = 0
a :str = fa + fa
a , a :List[Any] = fa, f
index += 1
for _ in str(UpperCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 94 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = 'Hello, World!'
UpperCAmelCase_ = 'en_XX'
def lowerCamelCase__ ( A__ : str , A__ : str , A__ : bool ):
'''simple docstring'''
__lowerCamelCase = Path("""data_bin""" )
__lowerCamelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(A__ ).parent ) , checkpoint_file=Path(A__ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(A__ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(A__ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(A__ )
__lowerCamelCase = xmod.model.encoder.sentence_encoder
__lowerCamelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCamelCase = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , A__ )
__lowerCamelCase = XmodForSequenceClassification(A__ ) if classification_head else XmodForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase = xmod_sent_encoder.embed_tokens.weight
__lowerCamelCase = xmod_sent_encoder.embed_positions.weight
__lowerCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCamelCase = xmod_sent_encoder.layernorm_embedding.weight
__lowerCamelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase = model.roberta.encoder.layer[i]
__lowerCamelCase = xmod_sent_encoder.layers[i]
# self attention
__lowerCamelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
__lowerCamelCase = xmod_layer.self_attn.q_proj.weight
__lowerCamelCase = xmod_layer.self_attn.q_proj.bias
__lowerCamelCase = xmod_layer.self_attn.k_proj.weight
__lowerCamelCase = xmod_layer.self_attn.k_proj.bias
__lowerCamelCase = xmod_layer.self_attn.v_proj.weight
__lowerCamelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
__lowerCamelCase = xmod_layer.self_attn.out_proj.weight
__lowerCamelCase = xmod_layer.self_attn.out_proj.bias
__lowerCamelCase = xmod_layer.self_attn_layer_norm.weight
__lowerCamelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCamelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
__lowerCamelCase = xmod_layer.fca.weight
__lowerCamelCase = xmod_layer.fca.bias
# output
__lowerCamelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
__lowerCamelCase = xmod_layer.fca.weight
__lowerCamelCase = xmod_layer.fca.bias
__lowerCamelCase = xmod_layer.final_layer_norm.weight
__lowerCamelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCamelCase = xmod_layer.adapter_layer_norm.weight
__lowerCamelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCamelCase = bert_output.adapter_modules[lang_code]
__lowerCamelCase = xmod_layer.adapter_modules[lang_code]
__lowerCamelCase = from_adapter.fca.weight
__lowerCamelCase = from_adapter.fca.bias
__lowerCamelCase = from_adapter.fca.weight
__lowerCamelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCamelCase = xmod_sent_encoder.layer_norm.weight
__lowerCamelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCamelCase = xmod.model.classification_heads["""mnli"""].dense.weight
__lowerCamelCase = xmod.model.classification_heads["""mnli"""].dense.bias
__lowerCamelCase = xmod.model.classification_heads["""mnli"""].out_proj.weight
__lowerCamelCase = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__lowerCamelCase = xmod.model.encoder.lm_head.dense.weight
__lowerCamelCase = xmod.model.encoder.lm_head.dense.bias
__lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase = xmod.model.encoder.lm_head.weight
__lowerCamelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase = xmod.encode(A__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(A__ )
__lowerCamelCase = model(A__ )[0]
if classification_head:
__lowerCamelCase = xmod.model.classification_heads["""mnli"""](xmod.extract_features(A__ ) )
else:
__lowerCamelCase = xmod.model(A__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__lowerCamelCase = torch.allclose(A__ , A__ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 357 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = MvpTokenizer
__UpperCamelCase : Any = MvpTokenizerFast
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : int = filter_roberta_detectors
def __magic_name__ (self ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ : List[str] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __magic_name__ (self ) -> Any:
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : Any = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Test that special tokens are reset
@require_torch
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , SCREAMING_SNAKE_CASE__ )
self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE__ )
self.assertNotIn("""labels""" , SCREAMING_SNAKE_CASE__ )
self.assertNotIn("""decoder_attention_mask""" , SCREAMING_SNAKE_CASE__ )
@require_torch
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ["""A long paragraph for summarization."""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , text_target=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """A, <mask> AllenNLP sentence."""
SCREAMING_SNAKE_CASE__ : int = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 25 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[str]=7 ) -> str:
lowercase : int = None
if token is not None:
lowercase : Any = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase : int = "636036"
lowercase : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase : int = requests.get(__snake_case , headers=__snake_case ).json()
return result["workflow_runs"]
def __magic_name__ ( __snake_case : Dict ) -> Tuple:
lowercase : Tuple = get_daily_ci_runs(__snake_case )
lowercase : Union[str, Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase : List[Any] = workflow_run["id"]
break
return workflow_run_id
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> int:
lowercase : Dict = get_last_daily_ci_runs(__snake_case )
if workflow_run_id is not None:
lowercase : Dict = get_artifacts_links(worflow_run_id=__snake_case , token=__snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__snake_case , artifact_url=__snake_case , output_dir=__snake_case , token=__snake_case )
def __magic_name__ ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Tuple ) -> Optional[int]:
get_last_daily_ci_artifacts(__snake_case , __snake_case , __snake_case )
lowercase : str = {}
for artifact_name in artifact_names:
lowercase : Optional[Any] = os.path.join(__snake_case , f"""{artifact_name}.zip""" )
if os.path.isfile(__snake_case ):
lowercase : List[Any] = {}
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
with z.open(__snake_case ) as f:
lowercase : str = f.read().decode("UTF-8" )
return results
| 202 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : Any ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : int ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : int , *lowercase_ : Any , **lowercase_ : Optional[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : str , **lowercase_ : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : List[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : List[str] , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : int , **lowercase_ : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : List[str] , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : int , **lowercase_ : Any ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Dict , *lowercase_ : Tuple , **lowercase_ : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : Optional[int] , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : List[Any] , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Optional[int] , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : Any , **lowercase_ : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Any , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Any , *lowercase_ : Any , **lowercase_ : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : Any , **lowercase_ : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : Union[str, Any] , **lowercase_ : int ):
requires_backends(cls , ["""torch"""] )
def lowerCamelCase ( *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
requires_backends(UpperCAmelCase__ , ["""torch"""] )
def lowerCamelCase ( *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Union[str, Any] ) -> str:
requires_backends(UpperCAmelCase__ , ["""torch"""] )
def lowerCamelCase ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Any ) -> Optional[Any]:
requires_backends(UpperCAmelCase__ , ["""torch"""] )
def lowerCamelCase ( *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[int] ) -> str:
requires_backends(UpperCAmelCase__ , ["""torch"""] )
def lowerCamelCase ( *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> Optional[int]:
requires_backends(UpperCAmelCase__ , ["""torch"""] )
def lowerCamelCase ( *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ) -> List[str]:
requires_backends(UpperCAmelCase__ , ["""torch"""] )
def lowerCamelCase ( *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[Any] ) -> int:
requires_backends(UpperCAmelCase__ , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Any , *lowercase_ : List[Any] , **lowercase_ : Any ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : str , **lowercase_ : List[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : str , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Dict , *lowercase_ : Any , **lowercase_ : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Any , **lowercase_ : int ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : str , **lowercase_ : str ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Any , **lowercase_ : List[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : int , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : str , *lowercase_ : Dict , **lowercase_ : int ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : List[Any] , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Tuple , *lowercase_ : int , **lowercase_ : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Any , *lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : int , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Dict ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Any ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : str , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : int , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Tuple , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Dict , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : int , *lowercase_ : int , **lowercase_ : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Tuple , **lowercase_ : int ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : str , *lowercase_ : List[str] , **lowercase_ : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : int , **lowercase_ : Any ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Dict , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Optional[int] , **lowercase_ : Any ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : int ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : List[Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : int , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Dict , **lowercase_ : Dict ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : List[str] , **lowercase_ : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : str , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Tuple , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Tuple , *lowercase_ : List[str] , **lowercase_ : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : str , **lowercase_ : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : str , **lowercase_ : str ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Any , *lowercase_ : Tuple , **lowercase_ : List[str] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : List[Any] , **lowercase_ : int ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : List[Any] , **lowercase_ : Any ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : List[Any] , *lowercase_ : List[Any] , **lowercase_ : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Tuple , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Any , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Tuple , **lowercase_ : int ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : Tuple , **lowercase_ : List[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : int , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : List[Any] , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Dict , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : Union[str, Any] , **lowercase_ : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Optional[int] , **lowercase_ : int ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : str , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Tuple , **lowercase_ : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Tuple , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , *lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Any , *lowercase_ : List[Any] , **lowercase_ : Any ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : int ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Optional[int] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Optional[Any] , **lowercase_ : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , *lowercase_ : Dict , **lowercase_ : List[Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : str , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : int , **lowercase_ : str ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Dict , *lowercase_ : Any , **lowercase_ : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Any ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : List[str] , **lowercase_ : Dict ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : Any ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : str , **lowercase_ : str ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : Any ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Any , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : int ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , *lowercase_ : str , **lowercase_ : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Tuple , *lowercase_ : Tuple , **lowercase_ : List[str] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : str , **lowercase_ : int ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Dict , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , *lowercase_ : List[str] , **lowercase_ : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Any , *lowercase_ : List[Any] , **lowercase_ : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : str , **lowercase_ : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , *lowercase_ : int , **lowercase_ : List[str] ):
requires_backends(cls , ["""torch"""] )
class __magic_name__ ( metaclass=_UpperCAmelCase):
UpperCamelCase__ = ['''torch''']
def __init__( self : Dict , *lowercase_ : List[Any] , **lowercase_ : Any ):
requires_backends(self , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *lowercase_ : Tuple , **lowercase_ : List[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , *lowercase_ : Any , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
| 21 | '''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float:
lowercase_ : List[Any] = x
lowercase_ : Any = y
for step in range(UpperCAmelCase__ ): # noqa: B007
lowercase_ : Dict = a * a - b * b + x
lowercase_ : str = 2 * a * b + y
lowercase_ : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image:
lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) )
lowercase_ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ : Any = figure_width / image_width * image_height
lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ )
else:
lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 21 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ ):
A__ : str = data
def __iter__( self ):
for element in self.data:
yield element
def UpperCamelCase (lowercase_: Any=True ) -> Dict:
A__ : Any = Accelerator(even_batches=lowercase_ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCamelCase (lowercase_: Accelerator , lowercase_: int , lowercase_: int , lowercase_: bool = False ) -> List[str]:
if iterable:
A__ : Union[str, Any] = DummyIterableDataset(torch.as_tensor(range(lowercase_ ) ) )
else:
A__ : int = TensorDataset(torch.as_tensor(range(lowercase_ ) ) )
A__ : Dict = DataLoader(lowercase_ , batch_size=lowercase_ )
A__ : int = accelerator.prepare(lowercase_ )
return dl
def UpperCamelCase (lowercase_: Accelerator , lowercase_: int , lowercase_: int , lowercase_: List[int] , lowercase_: List[int] , ) -> List[str]:
A__ : str = create_dataloader(accelerator=lowercase_ , dataset_size=lowercase_ , batch_size=lowercase_ )
A__ : List[str] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCamelCase () -> Tuple:
A__ : Any = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowercase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowercase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCamelCase () -> List[str]:
A__ : str = create_accelerator(even_batches=lowercase_ )
verify_dataloader_batch_sizes(
lowercase_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
lowercase_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCamelCase () -> int:
A__ : str = create_accelerator(even_batches=lowercase_ )
A__ : List[Any] = torch.nn.Linear(1 , 1 )
A__ : Optional[Any] = accelerator.prepare(lowercase_ )
A__ : List[str] = create_dataloader(lowercase_ , dataset_size=3 , batch_size=1 )
A__ : List[Any] = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(lowercase_ ):
A__ : List[Any] = ddp_model(batch[0].float() )
A__ : int = output.sum()
loss.backward()
batch_idxs.append(lowercase_ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCamelCase (lowercase_: int ) -> Union[str, Any]:
with warnings.catch_warnings(record=lowercase_ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , lowercase_ )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCamelCase () -> str:
A__ : Union[str, Any] = True
A__ : Union[str, Any] = False
A__ : List[Any] = create_accelerator(even_batches=lowercase_ )
A__ : Tuple = torch.nn.Linear(1 , 1 )
A__ : Tuple = accelerator.prepare(lowercase_ )
A__ : List[str] = create_dataloader(lowercase_ , dataset_size=3 , batch_size=1 )
A__ : int = create_dataloader(lowercase_ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowercase_ ):
A__ : Union[str, Any] = train_dl.batch_sampler.even_batches
A__ : Any = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCamelCase () -> Tuple:
A__ : Optional[Any] = True
A__ : str = False
A__ : str = create_accelerator(even_batches=lowercase_ )
A__ : str = torch.nn.Linear(1 , 1 )
A__ : List[Any] = accelerator.prepare(lowercase_ )
create_dataloader(lowercase_ , dataset_size=3 , batch_size=1 , iterable=lowercase_ )
A__ : Any = create_dataloader(lowercase_ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowercase_ ):
A__ : List[Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCamelCase () -> Union[str, Any]:
A__ : Tuple = create_accelerator()
A__ : Tuple = torch.nn.Linear(1 , 1 )
A__ : Optional[Any] = accelerator.prepare(lowercase_ )
create_dataloader(lowercase_ , dataset_size=3 , batch_size=1 , iterable=lowercase_ )
with warnings.catch_warnings(record=lowercase_ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowercase_ ):
pass
assert issubclass(w[-1].category , lowercase_ )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCamelCase () -> Any:
A__ : Optional[int] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
A__ : Dict = accelerator.state.distributed_type
A__ : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowercase_ )
A__ : Optional[int] = original_state
if __name__ == "__main__":
main()
| 141 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=A__ ).to(A__ )
A__ : str = AutoTokenizer.from_pretrained("""google/mt5-small""" )
A__ : int = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
A__ : List[Any] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
A__ : Union[str, Any] = model(input_ids.to(A__ ) , labels=labels.to(A__ ) ).loss
A__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
A__ : Any = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 141 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A: Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : int = ['''input_features''']
def __init__( self , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE=160 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
UpperCAmelCase : Optional[int] = n_fft
UpperCAmelCase : Optional[Any] = hop_length
UpperCAmelCase : Any = chunk_length
UpperCAmelCase : int = chunk_length * sampling_rate
UpperCAmelCase : Optional[Any] = self.n_samples // hop_length
UpperCAmelCase : int = sampling_rate
UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase_ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=lowerCAmelCase_ , norm="""slaney""" , mel_scale="""slaney""" , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
UpperCAmelCase : str = spectrogram(
lowerCAmelCase_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
UpperCAmelCase : Union[str, Any] = log_spec[:, :-1]
UpperCAmelCase : Union[str, Any] = np.maximum(lowerCAmelCase_ , log_spec.max() - 8.0 )
UpperCAmelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is not None:
UpperCAmelCase : int = np.array(lowerCAmelCase_ , np.intaa )
UpperCAmelCase : Any = []
for vector, length in zip(lowerCAmelCase_ , attention_mask.sum(-1 ) ):
UpperCAmelCase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase : Tuple = padding_value
normed_input_values.append(lowerCAmelCase_ )
else:
UpperCAmelCase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "max_length" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase : Dict = isinstance(lowerCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase : List[Any] = is_batched_numpy or (
isinstance(lowerCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray ):
UpperCAmelCase : Tuple = np.asarray(lowerCAmelCase_ , dtype=np.floataa )
elif isinstance(lowerCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : List[str] = [np.asarray([raw_speech] ).T]
UpperCAmelCase : List[str] = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
UpperCAmelCase : Optional[int] = self.pad(
lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=max_length if max_length else self.n_samples , truncation=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCAmelCase : str = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
UpperCAmelCase : Dict = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
UpperCAmelCase : Tuple = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
UpperCAmelCase : Optional[int] = [self._np_extract_fbank_features(lowerCAmelCase_ ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowerCAmelCase_ ):
UpperCAmelCase : Tuple = [np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for feature in input_features]
else:
UpperCAmelCase : Optional[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCAmelCase : Union[str, Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
UpperCAmelCase : Any = padded_inputs.convert_to_tensors(lowerCAmelCase_ )
return padded_inputs
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 109 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {'vocab_file': 'vocab.txt'}
UpperCAmelCase__ : List[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCAmelCase__ : Union[str, Any] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
UpperCAmelCase__ : Dict = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ConvBertTokenizer
def __init__( self : Tuple , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : List[Any]="[CLS]" , lowerCAmelCase_ : Optional[int]="[MASK]" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_A: List[str] = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) )
_A: List[Any] = do_lower_case
_A: Optional[Any] = strip_accents
_A: Union[str, Any] = tokenize_chinese_chars
_A: Optional[int] = normalizer_class(**lowerCAmelCase_ )
_A: Optional[Any] = do_lower_case
def __magic_name__ ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=None ):
"""simple docstring"""
_A: Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
_A: Any = [self.sep_token_id]
_A: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
_A: str = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 121 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCAmelCase_ ( ):
__snake_case : List[Any] = 9
__snake_case : Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
__snake_case : Tuple = kruskal(__lowerCamelCase , __lowerCamelCase )
__snake_case : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCamelCase ) == sorted(__lowerCamelCase )
| 134 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["image_processor", "tokenizer"]
__UpperCAmelCase : str = "OwlViTImageProcessor"
__UpperCAmelCase : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , lowerCamelCase : Any=None , lowerCamelCase : Any=None , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Union[str, Any] , lowerCamelCase : Tuple=None , lowerCamelCase : int=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]="max_length" , lowerCamelCase : Dict="np" , **lowerCamelCase : str ) -> List[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCamelCase , lowerCamelCase ) or (isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(text[0] , lowerCamelCase )):
__snake_case : Union[str, Any] = [self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )]
elif isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(text[0] , lowerCamelCase ):
__snake_case : Tuple = []
# Maximum number of queries across batch
__snake_case : str = max([len(lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase ) != max_num_queries:
__snake_case : Dict = t + [" "] * (max_num_queries - len(lowerCamelCase ))
__snake_case : int = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
encodings.append(lowerCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__snake_case : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__snake_case : List[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Any = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__snake_case : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__snake_case : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__snake_case : int = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Dict = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__snake_case : Any = BatchEncoding()
__snake_case : Tuple = input_ids
__snake_case : int = attention_mask
if query_images is not None:
__snake_case : List[Any] = BatchEncoding()
__snake_case : Union[str, Any] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ).pixel_values
__snake_case : str = query_pixel_values
if images is not None:
__snake_case : Optional[int] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__snake_case : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __snake_case ( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> str:
return self.image_processor.post_process(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : str , **lowerCamelCase : List[str] ) -> Tuple:
return self.image_processor.post_process_object_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> Any:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> str:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> Tuple:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Any ) -> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def __snake_case ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 134 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase="pt" ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = {'add_prefix_space': True} if isinstance(__snake_case , __snake_case ) and not line.startswith(''' ''' ) else {}
lowerCamelCase__ : str = padding_side
return tokenizer(
[line] , max_length=__snake_case , padding='''max_length''' if pad_to_max_length else None , truncation=__snake_case , return_tensors=__snake_case , add_special_tokens=__snake_case , **__snake_case , )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : str = input_ids.ne(__snake_case ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __SCREAMING_SNAKE_CASE ( _snake_case ):
def __init__( self : str , A : Optional[Any] , A : Any , A : Optional[int] , A : int , A : str="train" , A : Dict=None , A : Optional[Any]=None , A : int=None , A : Tuple="" , ) ->Any:
super().__init__()
lowerCamelCase__ : Optional[Any] = Path(_UpperCamelCase ).joinpath(type_path + '''.source''' )
lowerCamelCase__ : Dict = Path(_UpperCamelCase ).joinpath(type_path + '''.target''' )
lowerCamelCase__ : Optional[Any] = self.get_char_lens(self.src_file )
lowerCamelCase__ : Dict = max_source_length
lowerCamelCase__ : List[Any] = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
lowerCamelCase__ : List[Any] = tokenizer
lowerCamelCase__ : Tuple = prefix
if n_obs is not None:
lowerCamelCase__ : int = self.src_lens[:n_obs]
lowerCamelCase__ : Optional[int] = src_lang
lowerCamelCase__ : List[str] = tgt_lang
def __len__( self : str ) ->Dict:
return len(self.src_lens )
def __getitem__( self : List[Any] , A : List[str] ) ->Dict[str, torch.Tensor]:
lowerCamelCase__ : Union[str, Any] = index + 1 # linecache starts at 1
lowerCamelCase__ : Any = self.prefix + linecache.getline(str(self.src_file ) , _UpperCamelCase ).rstrip('''\n''' )
lowerCamelCase__ : Optional[int] = linecache.getline(str(self.tgt_file ) , _UpperCamelCase ).rstrip('''\n''' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCamelCase__ : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
)
lowerCamelCase__ : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
lowerCamelCase__ : str = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , '''right''' )
lowerCamelCase__ : List[Any] = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , '''right''' )
lowerCamelCase__ : Optional[Any] = source_inputs['input_ids'].squeeze()
lowerCamelCase__ : Any = target_inputs['input_ids'].squeeze()
lowerCamelCase__ : int = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( A : Any ) ->Optional[int]:
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def __lowerCamelCase ( self : int , A : List[str] ) ->Dict[str, torch.Tensor]:
lowerCamelCase__ : List[str] = torch.stack([x['''input_ids'''] for x in batch] )
lowerCamelCase__ : int = torch.stack([x['''attention_mask'''] for x in batch] )
lowerCamelCase__ : Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowerCamelCase__ : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
lowerCamelCase__ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
lowerCamelCase__ : Union[str, Any] = trim_batch(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase )
lowerCamelCase__ : List[Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_A : List[str] = getLogger(__name__)
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
return list(itertools.chain.from_iterable(__snake_case ) )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = get_git_info()
save_json(__snake_case , os.path.join(__snake_case , '''git_log.json''' ) )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=4 , **UpperCAmelCase ) -> str:
"""simple docstring"""
with open(__snake_case , '''w''' ) as f:
json.dump(__snake_case , __snake_case , indent=__snake_case , **__snake_case )
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
with open(__snake_case ) as f:
return json.load(__snake_case )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : int = git.Repo(search_parent_directories=__snake_case )
lowerCamelCase__ : Tuple = {
'repo_id': str(__snake_case ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
return list(map(__snake_case , __snake_case ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
with open(__snake_case , '''wb''' ) as f:
return pickle.dump(__snake_case , __snake_case )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
def remove_articles(UpperCAmelCase ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , __snake_case )
def white_space_fix(UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Any = normalize_answer(__snake_case ).split()
lowerCamelCase__ : str = normalize_answer(__snake_case ).split()
lowerCamelCase__ : Tuple = Counter(__snake_case ) & Counter(__snake_case )
lowerCamelCase__ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
lowerCamelCase__ : List[Any] = 1.0 * num_same / len(__snake_case )
lowerCamelCase__ : Tuple = 1.0 * num_same / len(__snake_case )
lowerCamelCase__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
return normalize_answer(__snake_case ) == normalize_answer(__snake_case )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
assert len(__snake_case ) == len(__snake_case )
lowerCamelCase__ : Any = 0
for hypo, pred in zip(__snake_case , __snake_case ):
em += exact_match_score(__snake_case , __snake_case )
if len(__snake_case ) > 0:
em /= len(__snake_case )
return {"em": em}
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCamelCase__ : str = 'dropout_rate'
for p in extra_params:
if getattr(__snake_case , __snake_case , __snake_case ):
if not hasattr(__snake_case , __snake_case ) and not hasattr(__snake_case , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__snake_case ) )
delattr(__snake_case , __snake_case )
continue
lowerCamelCase__ : Optional[Any] = p if hasattr(__snake_case , __snake_case ) else equivalent_param[p]
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
delattr(__snake_case , __snake_case )
return hparams, config
| 142 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = ["""pixel_values"""]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BICUBIC , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_snake_case = size if size is not None else {'shortest_edge': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
_snake_case = do_convert_rgb
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BICUBIC , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_snake_case = get_resize_output_image_size(lowerCAmelCase_ , size=size['shortest_edge'] , default_to_square=lowerCAmelCase_ )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='size' , default_to_square=lowerCAmelCase_ )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' , default_to_square=lowerCAmelCase_ )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_snake_case = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_snake_case = [convert_to_rgb(lowerCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_snake_case = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 160 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> tuple:
_snake_case = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase( _a, unittest.TestCase ):
lowercase_ : Any = KandinskyImgaImgPipeline
lowercase_ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowercase_ : Any = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowercase_ : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ : Union[str, Any] = False
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return 1_00
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base')
return tokenizer
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=10_05, )
_lowercase : Optional[int] = MultilingualCLIP(lowerCamelCase)
_lowercase : List[str] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Union[str, Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowercase : Optional[Any] = UNetaDConditionModel(**lowerCamelCase)
return model
@property
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.dummy_text_encoder
_lowercase : List[Any] = self.dummy_tokenizer
_lowercase : int = self.dummy_unet
_lowercase : int = self.dummy_movq
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_lowercase : List[Any] = DDIMScheduler(**lowerCamelCase)
_lowercase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict:
"""simple docstring"""
_lowercase : List[str] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(lowerCamelCase)
# create init_image
_lowercase : Tuple = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Tuple = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB').resize((2_56, 2_56))
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = 'cpu'
_lowercase : Tuple = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**lowerCamelCase)
_lowercase : str = pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = pipe(**self.get_dummy_inputs(lowerCamelCase))
_lowercase : Optional[int] = output.images
_lowercase : List[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase), return_dict=lowerCamelCase, )[0]
_lowercase : List[str] = image[0, -3:, -3:, -1]
_lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase : Tuple = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy')
_lowercase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png')
_lowercase : Optional[int] = 'A red cartoon frog, 4k'
_lowercase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa)
pipe_prior.to(lowerCamelCase)
_lowercase : Optional[Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1', torch_dtype=torch.floataa)
_lowercase : List[Any] = pipeline.to(lowerCamelCase)
pipeline.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = torch.Generator(device='cpu').manual_seed(0)
_lowercase , _lowercase : List[Any] = pipe_prior(
lowerCamelCase, generator=lowerCamelCase, num_inference_steps=5, negative_prompt='', ).to_tuple()
_lowercase : Union[str, Any] = pipeline(
lowerCamelCase, image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=1_00, height=7_68, width=7_68, strength=0.2, output_type='np', )
_lowercase : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
| 21 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =num_labels
_lowerCAmelCase =num_choices
_lowerCAmelCase =relative_attention
_lowerCAmelCase =position_biased_input
_lowerCAmelCase =pos_att_type
_lowerCAmelCase =scope
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
if self.use_token_type_ids:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_lowerCAmelCase =TFDebertaVaModel(config=__UpperCAmelCase )
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase =[input_ids, input_mask]
_lowerCAmelCase =model(__UpperCAmelCase )
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_lowerCAmelCase =TFDebertaVaForMaskedLM(config=__UpperCAmelCase )
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFDebertaVaForSequenceClassification(config=__UpperCAmelCase )
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFDebertaVaForTokenClassification(config=__UpperCAmelCase )
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_lowerCAmelCase =TFDebertaVaForQuestionAnswering(config=__UpperCAmelCase )
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase =model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =TFDebertaVaModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def _lowerCAmelCase ( self ) -> Optional[int]:
pass
@slow
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
_lowerCAmelCase =tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_lowerCAmelCase =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
_lowerCAmelCase =tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1e-4 )
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''llama'''
lowerCamelCase = ['''past_key_values''']
def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_key_value_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =initializer_range
_lowerCAmelCase =rms_norm_eps
_lowerCAmelCase =pretraining_tp
_lowerCAmelCase =use_cache
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase )
_lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 341 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "lilt"
def __init__( self : Dict , __lowercase : Any=30522 , __lowercase : Dict=768 , __lowercase : List[str]=12 , __lowercase : Tuple=12 , __lowercase : List[Any]=3072 , __lowercase : Any="gelu" , __lowercase : Optional[int]=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=512 , __lowercase : Union[str, Any]=2 , __lowercase : str=0.0_2 , __lowercase : Dict=1E-12 , __lowercase : Optional[int]=0 , __lowercase : List[str]="absolute" , __lowercase : Any=None , __lowercase : str=4 , __lowercase : Optional[int]=1024 , **__lowercase : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowercase , **__lowercase )
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =hidden_act
__lowercase =intermediate_size
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =position_embedding_type
__lowercase =classifier_dropout
__lowercase =channel_shrink_ratio
__lowercase =max_ad_position_embeddings
| 141 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class lowerCAmelCase ( A , A ):
lowerCAmelCase_ = "bit"
lowerCAmelCase_ = ["preactivation", "bottleneck"]
lowerCAmelCase_ = ["SAME", "VALID"]
def __init__( self : Union[str, Any] , __lowercase : Tuple=3 , __lowercase : Tuple=64 , __lowercase : List[str]=[256, 512, 1024, 2048] , __lowercase : int=[3, 4, 6, 3] , __lowercase : Optional[Any]="preactivation" , __lowercase : str="relu" , __lowercase : Tuple=None , __lowercase : int=32 , __lowercase : int=0.0 , __lowercase : Dict=False , __lowercase : List[Any]=32 , __lowercase : List[str]=1 , __lowercase : str=None , __lowercase : Any=None , **__lowercase : List[str] , ):
"""simple docstring"""
super().__init__(**__lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowercase =global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__lowercase =num_channels
__lowercase =embedding_size
__lowercase =hidden_sizes
__lowercase =depths
__lowercase =layer_type
__lowercase =hidden_act
__lowercase =global_padding
__lowercase =num_groups
__lowercase =drop_path_rate
__lowercase =embedding_dynamic_padding
__lowercase =output_stride
__lowercase =width_factor
__lowercase =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(__lowercase ) + 1 )]
__lowercase , __lowercase =get_aligned_output_features_output_indices(
out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names )
| 141 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
A__ = TOKENIZER_CLASSES
else:
A__ = {tokenizer_name: getattr(lowercase_ , tokenizer_name + "Fast" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
A__ = TOKENIZER_CLASSES[tokenizer_name]
A__ = True
if checkpoint_name is None:
A__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
A__ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
A__ = tokenizer_class.from_pretrained(lowercase_ , force_download=lowercase_ )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
A__, A__ = checkpoint.split("/" )
A__ = os.path.join(lowercase_ , lowercase_ )
elif add_prefix:
A__ = checkpoint
A__ = dump_path
else:
A__ = None
A__ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
A__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
A__ = file_path.split(lowercase_ )[-1][0]
if next_char == "/":
A__ = os.path.join(lowercase_ , lowercase_ )
A__ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
A__ = tokenizer.save_pretrained(
lowercase_ , legacy_format=lowercase_ , filename_prefix=lowercase_ )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(lowercase_ )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 230 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__snake_case : Any = 'Create a default config file for Accelerate with only a few flags set.'
def __lowerCamelCase ( __snake_case : Optional[int]="no", __snake_case : str = default_json_config_file, __snake_case : bool = False ) -> List[str]:
"""simple docstring"""
A__ : List[str] =Path(__snake_case )
path.parent.mkdir(parents=__snake_case, exist_ok=__snake_case )
if path.exists():
print(
f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
A__ : Tuple =mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
A__ : Any ={
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
A__ : Tuple =torch.cuda.device_count()
A__ : Dict =num_gpus
A__ : List[Any] =False
if num_gpus > 1:
A__ : Optional[int] ="""MULTI_GPU"""
else:
A__ : Tuple ="""NO"""
elif is_xpu_available() and use_xpu:
A__ : Optional[int] =torch.xpu.device_count()
A__ : List[Any] =num_xpus
A__ : Union[str, Any] =False
if num_xpus > 1:
A__ : Tuple ="""MULTI_XPU"""
else:
A__ : List[Any] ="""NO"""
elif is_npu_available():
A__ : List[Any] =torch.npu.device_count()
A__ : Tuple =num_npus
A__ : str =False
if num_npus > 1:
A__ : Optional[Any] ="""MULTI_NPU"""
else:
A__ : Dict ="""NO"""
else:
A__ : List[Any] =0
A__ : List[Any] =True
A__ : int =1
A__ : Tuple ="""NO"""
A__ : Optional[int] =ClusterConfig(**__snake_case )
config.to_json_file(__snake_case )
return path
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> Tuple:
"""simple docstring"""
A__ : Any =parser.add_parser("""default""", parents=__snake_case, help=__snake_case, formatter_class=__snake_case )
parser.add_argument(
"""--config_file""", default=__snake_case, help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
), dest="""save_location""", )
parser.add_argument(
"""--mixed_precision""", choices=["""no""", """fp16""", """bf16"""], type=__snake_case, help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""", default="""no""", )
parser.set_defaults(func=__snake_case )
return parser
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ : int =write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(f"accelerate configuration saved at {config_file}" )
| 134 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
'''simple docstring'''
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
A__ : list[float] =list(lowerCAmelCase_ )
A__ : Optional[int] =degree
def __add__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
if self.degree > polynomial_a.degree:
A__ : int =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
A__ : Any =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : str , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[Any] ) -> Polynomial:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : str , lowerCAmelCase_ : Polynomial ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : int | float ) -> int | float:
'''simple docstring'''
A__ : int | float =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[str] ) -> str:
'''simple docstring'''
A__ : Optional[int] =""""""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.__str__()
def lowercase__ ( self : str ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * self.degree
for i in range(self.degree ):
A__ : Union[str, Any] =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def lowercase__ ( self : Any , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
'''simple docstring'''
A__ : list[float] =[0] * (self.degree + 2)
A__ : Any =constant
for i in range(self.degree + 1 ):
A__ : str =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Optional[int] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] , lowerCAmelCase_ : object ) -> bool:
'''simple docstring'''
return not self.__eq__(lowerCAmelCase_ )
| 134 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = []
__lowerCAmelCase : int = len(a_ )
for i in range(a_ ):
__lowerCAmelCase : Optional[int] = -1
for j in range(i + 1 , a_ ):
if arr[i] < arr[j]:
__lowerCAmelCase : Dict = arr[j]
break
result.append(a_ )
return result
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = []
for i, outer in enumerate(a_ ):
__lowerCAmelCase : str = -1
for inner in arr[i + 1 :]:
if outer < inner:
__lowerCAmelCase : int = inner
break
result.append(a_ )
return result
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = len(a_ )
__lowerCAmelCase : str = []
__lowerCAmelCase : Optional[Any] = [-1] * arr_size
for index in reversed(range(a_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__lowerCAmelCase : Tuple = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
) | 364 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__lowerCAmelCase : Union[str, Any] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
__lowerCAmelCase : List[str] = F"{src_lang}-{tgt_lang}"
__lowerCAmelCase : Tuple = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__lowerCAmelCase : Any = os.path.join(_UpperCamelCase , 'README.md' )
print(F"Generating {path}" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(_UpperCamelCase )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split("""-""")
lowerCamelCase__ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 182 | 0 |
"""simple docstring"""
from torch import nn
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
__a : str = class_size
__a : Optional[Any] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__a : Any = nn.Linear(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
__a : Any = self.mlp(_UpperCAmelCase )
return logits | 160 |
"""simple docstring"""
A = 9.80665
def __A ( a_ :float , a_ :float , a_ :float = g) -> float:
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''')
if volume < 0:
raise ValueError('''Impossible Object volume''')
if gravity <= 0:
raise ValueError('''Impossible Gravity''')
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 160 | 1 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ) -> Any:
A_ : Any = data
A_ : Optional[int] = previous
A_ : Any = next_node
def __str__( self ) -> str:
return F"{self.data}"
def UpperCAmelCase_ ( self ) -> int:
return self.data
def UpperCAmelCase_ ( self ) -> Dict:
return self.next
def UpperCAmelCase_ ( self ) -> int:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Dict:
A_ : Optional[int] = head
def __iter__( self ) -> Any:
return self
def UpperCAmelCase_ ( self ) -> Tuple:
if not self.current:
raise StopIteration
else:
A_ : int = self.current.get_data()
A_ : str = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> List[str]:
A_ : Tuple = None # First node in list
A_ : Union[str, Any] = None # Last node in list
def __str__( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.head
A_ : Union[str, Any] = []
while current is not None:
nodes.append(current.get_data() )
A_ : Dict = current.get_next()
return " ".join(str(_lowerCamelCase ) for node in nodes )
def __contains__( self , _lowerCamelCase ) -> int:
A_ : List[str] = self.head
while current:
if current.get_data() == value:
return True
A_ : Optional[int] = current.get_next()
return False
def __iter__( self ) -> int:
return LinkedListIterator(self.head )
def UpperCAmelCase_ ( self ) -> int:
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase_ ( self ) -> int:
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
if self.head is None:
A_ : Optional[Any] = node
A_ : Optional[int] = node
else:
self.insert_before_node(self.head , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
if self.head is None:
self.set_head(_lowerCamelCase )
else:
self.insert_after_node(self.tail , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
A_ : Optional[Any] = Node(_lowerCamelCase )
if self.head is None:
self.set_head(_lowerCamelCase )
else:
self.set_tail(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
A_ : List[str] = node
A_ : Optional[Any] = node.previous
if node.get_previous() is None:
A_ : Union[str, Any] = node_to_insert
else:
A_ : List[str] = node_to_insert
A_ : List[str] = node_to_insert
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
A_ : Tuple = node
A_ : Dict = node.next
if node.get_next() is None:
A_ : List[Any] = node_to_insert
else:
A_ : int = node_to_insert
A_ : List[str] = node_to_insert
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
A_ : List[str] = 1
A_ : Optional[int] = Node(_lowerCamelCase )
A_ : Any = self.head
while node:
if current_position == position:
self.insert_before_node(_lowerCamelCase , _lowerCamelCase )
return
current_position += 1
A_ : Optional[int] = node.next
self.insert_after_node(self.tail , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Node:
A_ : List[Any] = self.head
while node:
if node.get_data() == item:
return node
A_ : List[Any] = node.get_next()
raise Exception("""Node not found""" )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> str:
if (node := self.get_node(_lowerCamelCase )) is not None:
if node == self.head:
A_ : int = self.head.get_next()
if node == self.tail:
A_ : Dict = self.tail.get_previous()
self.remove_node_pointers(_lowerCamelCase )
@staticmethod
def UpperCAmelCase_ ( _lowerCamelCase ) -> None:
if node.get_next():
A_ : Union[str, Any] = node.previous
if node.get_previous():
A_ : Union[str, Any] = node.next
A_ : List[str] = None
A_ : str = None
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.head is None
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = {'vocab_file': 'spm_char.model'}
UpperCamelCase__ : Optional[Any] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
UpperCamelCase__ : Union[str, Any] = {
'microsoft/speecht5_asr': 1_024,
'microsoft/speecht5_tts': 1_024,
'microsoft/speecht5_vc': 1_024,
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase = None , **_lowerCamelCase , ) -> None:
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A_ : List[Any] = vocab_file
A_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ) -> Any:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self ) -> int:
A_ : Dict = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
A_ : Optional[int] = self.__dict__.copy()
A_ : str = None
return state
def __setstate__( self , _lowerCamelCase ) -> List[str]:
A_ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Union[str, Any] = {}
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.piece_to_id(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
A_ : Dict = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = []
A_ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
A_ : Optional[int] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(_lowerCamelCase )) + suffix_ones
return ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : Optional[int] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 164 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case, _snake_case = emb.weight.shape
_snake_case = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
_snake_case = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
_snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
_snake_case = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_snake_case = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
_snake_case = """relu"""
_snake_case = state_dict["""decoder.embed_tokens.weight"""]
_snake_case = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
_snake_case = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 341 |
'''simple docstring'''
__lowerCAmelCase = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
_snake_case = 0
_snake_case = 0
while place < len(_SCREAMING_SNAKE_CASE ):
if (place + 1 < len(_SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
for arabic, roman in ROMAN:
((_snake_case), (_snake_case)) = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result.append(roman * factor )
if number == 0:
break
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 341 | 1 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]: # noqa: E741
"""simple docstring"""
snake_case__ : Union[str, Any] = len(__lowerCAmelCase )
snake_case__ : Union[str, Any] = 0
snake_case__ : List[Any] = [0] * n
snake_case__ : List[str] = [False] * n
snake_case__ : List[str] = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
snake_case__ : Dict = True
snake_case__ : Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
snake_case__ : Optional[Any] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
snake_case__ : Optional[int] = True
# AP found via cycle
if at == low[to]:
snake_case__ : List[Any] = True
else:
snake_case__ : Dict = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
snake_case__ : Any = 0
snake_case__ : Any = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
snake_case__ : List[Any] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
A__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A__ = 12_8022
A__ = 12_8028
@require_sentencepiece
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Optional[int] = MaMaaaTokenizer
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : Union[str, Any] = True
def __lowerCamelCase ( self :int ):
super().setUp()
snake_case__ : Tuple = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case__ : Optional[Any] = dict(zip(__lowercase ,range(len(__lowercase ) ) ) )
snake_case__ : List[Any] = Path(self.tmpdirname )
save_json(__lowercase ,save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__lowercase ,save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case__ : str = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :Optional[int] ,**__lowercase :Optional[int] ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Tuple ):
return (
"This is a test",
"This is a test",
)
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Tuple = '''</s>'''
snake_case__ : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) ,__lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Union[str, Any] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''</s>''' )
self.assertEqual(vocab_keys[1] ,'''<unk>''' )
self.assertEqual(vocab_keys[-1] ,'''<s>''' )
self.assertEqual(len(__lowercase ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def __lowerCamelCase ( self :List[Any] ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) ,[2, 3, 4, 5, 6] ,)
snake_case__ : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__lowercase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case__ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertEqual(__lowercase ,'''This is a test''' )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
# fmt: off
snake_case__ : Tuple = {'''input_ids''': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase ,model_name='''facebook/m2m100_418M''' ,revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = """facebook/m2m100_418M"""
__lowerCAmelCase : Union[str, Any] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
__lowerCAmelCase : Optional[Any] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
__lowerCAmelCase : Dict = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __lowerCamelCase ( cls :Union[str, Any] ):
snake_case__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang='''en''' ,tgt_lang='''fr''' )
snake_case__ : Union[str, Any] = 1
return cls
def __lowerCamelCase ( self :Tuple ):
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) ,1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) ,1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) ,1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) ,1_2_8_0_6_3 )
def __lowerCamelCase ( self :Any ):
snake_case__ : Optional[int] = self.tokenizer.get_vocab()
self.assertEqual(len(__lowercase ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] ,3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[int] = '''en'''
snake_case__ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,__lowercase )
def __lowerCamelCase ( self :List[Any] ):
self.assertIn(__lowercase ,self.tokenizer.all_special_ids )
# fmt: off
snake_case__ : int = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
snake_case__ : Tuple = self.tokenizer.decode(__lowercase ,skip_special_tokens=__lowercase )
snake_case__ : Optional[int] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__lowercase )
self.assertEqual(__lowercase ,__lowercase )
self.assertNotIn(self.tokenizer.eos_token ,__lowercase )
def __lowerCamelCase ( self :Any ):
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__lowercase )
snake_case__ : Any = MaMaaaTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.lang_token_to_id ,__lowercase )
@require_torch
def __lowerCamelCase ( self :str ):
snake_case__ : Dict = '''en'''
snake_case__ : List[Any] = '''fr'''
snake_case__ : Union[str, Any] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__lowercase ,return_tensors='''pt''' )
snake_case__ : Optional[int] = shift_tokens_right(
batch['''labels'''] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
snake_case__ : Optional[int] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[Any] = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
snake_case__ : Any = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Union[str, Any] = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case__ : List[str] = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = self.tokenizer._build_translation_inputs('''A test''' ,return_tensors='''pt''' ,src_lang='''en''' ,tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(__lowercase ) ,{
# en_XX, A, test, EOS
'''input_ids''': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 1_2_8_0_0_6,
} ,)
| 44 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"text": Value("string" )} )
lowercase_ = Features({} )
lowercase_ = "text"
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text"}
| 10 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 230 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *__a : List[Any] , **__a : List[str] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
__a =MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCamelCase__ ( self : Any , __a : Union[str, Any] , __a : Any , __a : Tuple ):
_a = ObjectDetectionPipeline(model=_a , image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCamelCase__ ( self : Union[str, Any] , __a : Optional[int] , __a : Dict ):
_a = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
"score": ANY(_a ),
"label": ANY(_a ),
"box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )},
} , )
import datasets
_a = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_a = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
_a = object_detector(_a , threshold=0.0 )
self.assertEqual(len(_a ) , len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
"score": ANY(_a ),
"label": ANY(_a ),
"box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def UpperCamelCase__ ( self : Optional[Any] ):
pass
@require_torch
def UpperCamelCase__ ( self : Dict ):
_a = """hf-internal-testing/tiny-detr-mobilenetsv3"""
_a = AutoModelForObjectDetection.from_pretrained(_a )
_a = AutoFeatureExtractor.from_pretrained(_a )
_a = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
_a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
] , )
_a = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
[
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
{"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_59, "ymin": 1_20, "xmax": 4_80, "ymax": 3_59}},
],
] , )
@require_torch
@slow
def UpperCamelCase__ ( self : int ):
_a = """facebook/detr-resnet-50"""
_a = AutoModelForObjectDetection.from_pretrained(_a )
_a = AutoFeatureExtractor.from_pretrained(_a )
_a = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
_a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
_a = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def UpperCamelCase__ ( self : int ):
_a = """facebook/detr-resnet-50"""
_a = pipeline("object-detection" , model=_a )
_a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
_a = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
[
{"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 1_75, "ymax": 1_17}},
{"score": 0.9960, "label": "remote", "box": {"xmin": 3_33, "ymin": 72, "xmax": 3_68, "ymax": 1_87}},
{"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_39, "ymax": 4_73}},
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
],
] , )
@require_torch
@slow
def UpperCamelCase__ ( self : str ):
_a = 0.9985
_a = """facebook/detr-resnet-50"""
_a = pipeline("object-detection" , model=_a )
_a = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=_a )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 3_14, "ymax": 4_70}},
{"score": 0.9987, "label": "cat", "box": {"xmin": 3_45, "ymin": 23, "xmax": 6_40, "ymax": 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = """Narsil/layoutlmv3-finetuned-funsd"""
_a = 0.9993
_a = pipeline("object-detection" , model=_a , threshold=_a )
_a = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
{"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_94, "ymin": 2_54, "xmax": 3_43, "ymax": 2_64}},
] , )
| 362 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=99 , UpperCAmelCase_ : Optional[int]=36 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : List[str]=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : int=6 , UpperCAmelCase_ : Dict=6 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=1_000 , ) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =parent
lowerCamelCase__: Union[str, Any] =batch_size
lowerCamelCase__: Dict =num_channels
lowerCamelCase__: int =image_size
lowerCamelCase__: List[Any] =patch_size
lowerCamelCase__: Union[str, Any] =text_seq_length
lowerCamelCase__: str =is_training
lowerCamelCase__: Dict =use_input_mask
lowerCamelCase__: Optional[Any] =use_token_type_ids
lowerCamelCase__: List[str] =use_labels
lowerCamelCase__: int =vocab_size
lowerCamelCase__: Optional[Any] =hidden_size
lowerCamelCase__: Tuple =num_hidden_layers
lowerCamelCase__: Optional[Any] =num_attention_heads
lowerCamelCase__: Optional[int] =intermediate_size
lowerCamelCase__: Union[str, Any] =hidden_act
lowerCamelCase__: Union[str, Any] =hidden_dropout_prob
lowerCamelCase__: Dict =attention_probs_dropout_prob
lowerCamelCase__: Any =max_position_embeddings
lowerCamelCase__: Tuple =type_vocab_size
lowerCamelCase__: str =type_sequence_label_size
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: Optional[int] =coordinate_size
lowerCamelCase__: Any =shape_size
lowerCamelCase__: Optional[Any] =num_labels
lowerCamelCase__: Optional[int] =num_choices
lowerCamelCase__: int =scope
lowerCamelCase__: str =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__: str =text_seq_length
lowerCamelCase__: List[Any] =(image_size // patch_size) ** 2 + 1
lowerCamelCase__: List[Any] =self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
lowerCamelCase__: Union[str, Any] =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__: Dict =bbox[i, j, 3]
lowerCamelCase__: Union[str, Any] =bbox[i, j, 1]
lowerCamelCase__: str =t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__: Tuple =bbox[i, j, 2]
lowerCamelCase__: Any =bbox[i, j, 0]
lowerCamelCase__: Optional[Any] =t
lowerCamelCase__: str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Union[str, Any] =None
if self.use_input_mask:
lowerCamelCase__: Optional[int] =random_attention_mask([self.batch_size, self.text_seq_length])
lowerCamelCase__: Any =None
if self.use_token_type_ids:
lowerCamelCase__: Any =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
lowerCamelCase__: str =None
lowerCamelCase__: List[Any] =None
if self.use_labels:
lowerCamelCase__: Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: Tuple =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
lowerCamelCase__: str =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =LayoutLMvaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# text + image
lowerCamelCase__: Optional[int] =model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_)
lowerCamelCase__: Tuple =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
lowerCamelCase__: Any =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
lowerCamelCase__: int =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
lowerCamelCase__: str =model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
lowerCamelCase__: int =model(pixel_values=UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Any =self.num_labels
lowerCamelCase__: Optional[Any] =LayoutLMvaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: int =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.num_labels
lowerCamelCase__: List[str] =LayoutLMvaForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Union[str, Any] =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] =LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Union[str, Any] =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
): List[Any] =config_and_inputs
lowerCamelCase__: List[str] ={
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any) ->Optional[int]:
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =LayoutLMvaModelTester(self)
lowerCamelCase__: Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=False) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =copy.deepcopy(UpperCAmelCase_)
if model_class in get_values(UpperCAmelCase_):
lowerCamelCase__: Union[str, Any] ={
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(UpperCAmelCase_ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase_):
lowerCamelCase__: Optional[int] =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in get_values(UpperCAmelCase_):
lowerCamelCase__: int =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
lowerCamelCase__: int =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in [
*get_values(UpperCAmelCase_),
]:
lowerCamelCase__: Union[str, Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in [
*get_values(UpperCAmelCase_),
]:
lowerCamelCase__: int =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
return inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__: Union[str, Any] =type
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: int =LayoutLMvaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__: str =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base").to(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.default_image_processor
lowerCamelCase__: List[Any] =prepare_img()
lowerCamelCase__: Union[str, Any] =image_processor(images=UpperCAmelCase_ , return_tensors="pt").pixel_values.to(UpperCAmelCase_)
lowerCamelCase__: Any =torch.tensor([[1, 2]])
lowerCamelCase__: str =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
lowerCamelCase__: Tuple =model(
input_ids=input_ids.to(UpperCAmelCase_) , bbox=bbox.to(UpperCAmelCase_) , pixel_values=pixel_values.to(UpperCAmelCase_) , )
# verify the logits
lowerCamelCase__: str =torch.Size((1, 199, 768))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
lowerCamelCase__: Dict =torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4))
| 10 | import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def A ( _lowercase , _lowercase , _lowercase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = is_small_dataset(_lowercase )
assert result == expected
| 182 | 0 |
import string
from math import logaa
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = document.translate(
str.maketrans('''''', '''''', string.punctuation ) ).replace('''\n''', '''''' )
UpperCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = corpus.lower().translate(
str.maketrans('''''', '''''', string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCamelCase__ = corpus_without_punctuation.split('''\n''' )
UpperCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase__ ))
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : List[str]=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ), 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ), 3 )
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
return round(tf * idf, 3 )
| 35 | import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class __lowercase ( A ):
'''simple docstring'''
_A : int = '''align_text_model'''
def __init__( self : Tuple , _a : Tuple=30_522 , _a : str=768 , _a : Tuple=12 , _a : Dict=12 , _a : Any=3_072 , _a : str="gelu" , _a : int=0.1 , _a : Optional[Any]=0.1 , _a : int=512 , _a : List[str]=2 , _a : Any=0.02 , _a : Dict=1E-12 , _a : Tuple=0 , _a : Optional[Any]="absolute" , _a : str=True , **_a : Union[str, Any] , ):
super().__init__(**_a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = pad_token_id
@classmethod
def A_ ( cls : List[str] , _a : Union[str, os.PathLike] , **_a : Any ):
cls._set_token_in_kwargs(_a )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCamelCase__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = '''align_vision_model'''
def __init__( self : List[str] , _a : int = 3 , _a : int = 600 , _a : float = 2.0 , _a : float = 3.1 , _a : int = 8 , _a : List[int] = [3, 3, 5, 3, 5, 5, 3] , _a : List[int] = [32, 16, 24, 40, 80, 112, 192] , _a : List[int] = [16, 24, 40, 80, 112, 192, 320] , _a : List[int] = [] , _a : List[int] = [1, 2, 2, 2, 1, 2, 1] , _a : List[int] = [1, 2, 2, 3, 3, 4, 1] , _a : List[int] = [1, 6, 6, 6, 6, 6, 6] , _a : float = 0.25 , _a : str = "swish" , _a : int = 2_560 , _a : str = "mean" , _a : float = 0.02 , _a : float = 0.001 , _a : float = 0.99 , _a : float = 0.2 , **_a : List[Any] , ):
super().__init__(**_a )
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = width_coefficient
UpperCamelCase__ = depth_coefficient
UpperCamelCase__ = depth_divisor
UpperCamelCase__ = kernel_sizes
UpperCamelCase__ = in_channels
UpperCamelCase__ = out_channels
UpperCamelCase__ = depthwise_padding
UpperCamelCase__ = strides
UpperCamelCase__ = num_block_repeats
UpperCamelCase__ = expand_ratios
UpperCamelCase__ = squeeze_expansion_ratio
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dim
UpperCamelCase__ = pooling_type
UpperCamelCase__ = initializer_range
UpperCamelCase__ = batch_norm_eps
UpperCamelCase__ = batch_norm_momentum
UpperCamelCase__ = drop_connect_rate
UpperCamelCase__ = sum(_a ) * 4
@classmethod
def A_ ( cls : Tuple , _a : Union[str, os.PathLike] , **_a : Union[str, Any] ):
cls._set_token_in_kwargs(_a )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCamelCase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = '''align'''
_A : Optional[int] = True
def __init__( self : Optional[int] , _a : Tuple=None , _a : int=None , _a : Any=640 , _a : Optional[Any]=1.0 , _a : Tuple=0.02 , **_a : List[Any] , ):
super().__init__(**_a )
if text_config is None:
UpperCamelCase__ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
UpperCamelCase__ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
UpperCamelCase__ = AlignTextConfig(**_a )
UpperCamelCase__ = AlignVisionConfig(**_a )
UpperCamelCase__ = projection_dim
UpperCamelCase__ = temperature_init_value
UpperCamelCase__ = initializer_range
@classmethod
def A_ ( cls : Optional[int] , _a : AlignTextConfig , _a : AlignVisionConfig , **_a : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def A_ ( self : Tuple ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.text_config.to_dict()
UpperCamelCase__ = self.vision_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 35 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 113 |
'''simple docstring'''
import os
import sys
__A = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__A = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoConfig.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoTokenizer.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModel.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoModel.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoModelForCausalLM.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoModelForMaskedLM.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoModelForSequenceClassification.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _A ( *lowercase__ , **lowercase__ ):
return AutoModelForQuestionAnswering.from_pretrained(*lowercase__ , **lowercase__ )
| 164 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> float:
"""simple docstring"""
_validate_point(lowerCAmelCase__ )
_validate_point(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase__, lowerCAmelCase__ ) ) )
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
if point:
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
for item in point:
if not isinstance(lowerCAmelCase__, (int, float) ):
_UpperCamelCase = (
'''Expected a list of numbers as input, found '''
F'''{type(lowerCAmelCase__ ).__name__}'''
)
raise TypeError(lowerCAmelCase__ )
else:
_UpperCamelCase = F'''Expected a list of numbers as input, found {type(lowerCAmelCase__ ).__name__}'''
raise TypeError(lowerCAmelCase__ )
else:
raise ValueError('''Missing an input''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> float:
"""simple docstring"""
_validate_point(lowerCAmelCase__ )
_validate_point(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase__, lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
"""simple docstring"""
import numpy
class _UpperCAmelCase:
def __init__( self , __a , __a) -> None:
'''simple docstring'''
_UpperCamelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_UpperCamelCase = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_UpperCamelCase = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_UpperCamelCase = numpy.random.rand(3 , 1)
# Real output values provided.
_UpperCamelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_UpperCamelCase = numpy.zeros(output_array.shape)
def UpperCAmelCase ( self) -> numpy.ndarray:
'''simple docstring'''
_UpperCamelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase ( self) -> None:
'''simple docstring'''
_UpperCamelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
_UpperCamelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
_UpperCamelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase ( self , __a , __a , __a) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1):
_UpperCamelCase = self.feedforward()
self.back_propagation()
if give_loss:
_UpperCamelCase = numpy.mean(numpy.square(output - self.feedforward()))
print(F'''Iteration {iteration} Loss: {loss}''')
def UpperCAmelCase ( self , __a) -> int:
'''simple docstring'''
_UpperCamelCase = input_arr
_UpperCamelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
_UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
_UpperCamelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def lowerCamelCase__ ( __snake_case ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase__ ( __snake_case ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
), dtype=numpy.floataa, )
# True output values for the given input values.
_UpperCamelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.floataa )
# Calling neural network class.
_UpperCamelCase = TwoHiddenLayerNeuralNetwork(
input_array=__snake_case, output_array=__snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__snake_case, iterations=10, give_loss=__snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 100 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = AlbertTokenizer
snake_case_ = AlbertTokenizerFast
snake_case_ = True
snake_case_ = True
snake_case_ = True
def A_ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = AlbertTokenizer(lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Dict , lowercase_ : Optional[int] ):
snake_case_ = '''this is a test'''
snake_case_ = '''this is a test'''
return input_text, output_text
def A_ ( self : Union[str, Any] ):
snake_case_ = '''<pad>'''
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(lowercase_ ) , 3_0000 )
def A_ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def A_ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = '''I was born in 92000, and this is falsé.'''
snake_case_ = tokenizer.tokenize(lowercase_ )
snake_case_ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowercase_ )
snake_case_ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def A_ ( self : int ):
snake_case_ = AlbertTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [48, 25, 21, 1289] )
snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
snake_case_ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
snake_case_ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def A_ ( self : Optional[Any] ):
snake_case_ = AlbertTokenizer(lowercase_ )
snake_case_ = tokenizer.encode('''sequence builders''' )
snake_case_ = tokenizer.encode('''multi-sequence build''' )
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def A_ ( self : Optional[int] ):
# fmt: off
snake_case_ = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 56 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
UpperCAmelCase__ = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 346 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCAmelCase_ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int ) -> List[str]:
super().__init__(features=lowerCamelCase__ )
lowerCAmelCase = torch_tensor_kwargs
import torch # noqa import torch at initialization
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase__ )
return column
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ) -> int:
import torch
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCAmelCase = {'''dtype''': torch.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
lowerCAmelCase = np.asarray(lowerCamelCase__ )
return torch.tensor(lowerCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any ) -> Dict:
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase__ , '__array__' ) and not isinstance(lowerCamelCase__ , torch.Tensor ):
lowerCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : dict ) -> Optional[int]:
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : pa.Table ) -> Mapping:
lowerCAmelCase = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
lowerCAmelCase = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : pa.Table ) -> "torch.Tensor":
lowerCAmelCase = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
lowerCAmelCase = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0] )
lowerCAmelCase = self.recursive_tensorize(lowerCamelCase__ )
lowerCAmelCase = self._consolidate(lowerCamelCase__ )
return column
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : pa.Table ) -> Mapping:
lowerCAmelCase = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
lowerCAmelCase = self.python_features_decoder.decode_batch(lowerCamelCase__ )
lowerCAmelCase = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
lowerCAmelCase = self._consolidate(batch[column_name] )
return batch
| 357 |
'''simple docstring'''
import math
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase )
def a_ ( lowerCamelCase : float = 1 / 12345 ):
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 3
while True:
lowerCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase ):
lowerCAmelCase = int(lowerCamelCase )
total_partitions += 1
if check_partition_perfect(lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 55 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCamelCase ( self : str ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : int = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
snake_case__ : List[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(snake_case_ ) )
def lowerCamelCase ( self : Any , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : Any , snake_case_ : str ):
snake_case__ : Union[str, Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCamelCase ( self : Any , snake_case_ : Dict ):
snake_case__ , snake_case__ : int = self.get_input_output_texts(snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : List[str] = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def lowerCamelCase ( self : Optional[Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase ( self : List[str] ):
pass # TODO add if relevant
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : int = """こんにちは、世界。 こんばんは、㔺界。"""
snake_case__ : Optional[int] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
snake_case__ : Dict = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
snake_case__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
snake_case__ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case__ : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case__ : Any = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
snake_case__ : Union[str, Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
snake_case__ : Optional[int] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
snake_case__ : Any = tokenizer.encode(snake_case_ )
snake_case__ : int = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Tuple = """こんにちは、世界。"""
snake_case__ : Optional[Any] = """こんばんは、㔺界。😀"""
snake_case__ : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
snake_case__ : Dict = tokenizer.encode(prefix_text + input_text )
snake_case__ : Dict = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
snake_case__ : int = tokenizer.encode(snake_case_ , prefix_text=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(snake_case_ )
snake_case__ : Union[str, Any] = tokenizer.decode(snake_case_ )
snake_case__ : str = tokenizer.decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
snake_case__ : Dict = """こんにちは、世界。"""
snake_case__ : Optional[int] = """こんばんは、㔺界。😀"""
snake_case__ : Any = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : Optional[int] = len(tokenizer.encode(snake_case_ ) ) - 2
snake_case__ : List[str] = [1] + [0] * (len_prefix + len_text + 1)
snake_case__ : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
snake_case__ : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case__ : Any = tokenizer(prefix_text + input_text ).token_type_ids
snake_case__ : str = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
snake_case__ : Optional[Any] = tokenizer(snake_case_ , prefix_text=snake_case_ ).token_type_ids
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : Union[str, Any] = tokenizer.encode("""あンいワ""" )
snake_case__ : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
snake_case__ : Dict = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertEqual(tokenizer.decode(snake_case_ ) , tokenizer.decode(snake_case_ ) )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertNotEqual(snake_case_ , snake_case_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
snake_case__ : int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
snake_case__ : Optional[Any] = tokenizer(snake_case_ , padding=snake_case_ )
snake_case__ : Tuple = tokenizer.batch_encode_plus(snake_case_ , padding=snake_case_ )
# fmt: off
snake_case__ : Optional[Any] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
snake_case__ : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case__ : Optional[Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , snake_case_ )
self.assertListEqual(x_token.token_type_ids , snake_case_ )
self.assertListEqual(x_token.attention_mask , snake_case_ )
self.assertListEqual(x_token_a.input_ids , snake_case_ )
self.assertListEqual(x_token_a.token_type_ids , snake_case_ )
self.assertListEqual(x_token_a.attention_mask , snake_case_ )
def lowerCamelCase ( self : Any ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase ( self : List[str] ):
# tokenizer has no padding token
pass
| 35 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
_A = nn.ModuleList(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Union[torch.Tensor, float, int] , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : List[torch.tensor] , _UpperCAmelCase : List[float] , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase , self.nets ) ):
_A , _A = controlnet(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# merge samples
if i == 0:
_A , _A = down_samples, mid_sample
else:
_A = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_UpperCAmelCase , _UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Callable = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , ):
_A = 0
_A = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_UpperCAmelCase , is_main_process=_UpperCAmelCase , save_function=_UpperCAmelCase , safe_serialization=_UpperCAmelCase , variant=_UpperCAmelCase , )
idx += 1
_A = model_path_to_save + F'''_{idx}'''
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , _UpperCAmelCase : Optional[Union[str, os.PathLike]] , **_UpperCAmelCase : Tuple ):
_A = 0
_A = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_A = pretrained_model_path
while os.path.isdir(_UpperCAmelCase ):
_A = ControlNetModel.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
controlnets.append(_UpperCAmelCase )
idx += 1
_A = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(_UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(_UpperCAmelCase ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(_UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(_UpperCAmelCase )
| 271 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
_A = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _snake_case ( _snake_case : str ) -> dict[str, str]:
'''simple docstring'''
_A = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_A = remove_duplicates(key.upper() )
_A = len(_snake_case )
# First fill cipher with key characters
_A = {alphabet[i]: char for i, char in enumerate(_snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_snake_case ) , 26 ):
_A = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_A = alphabet[i - offset]
_A = char
return cipher_alphabet
def _snake_case ( _snake_case : str , _snake_case : dict[str, str] ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(_snake_case , _snake_case ) for ch in message.upper() )
def _snake_case ( _snake_case : str , _snake_case : dict[str, str] ) -> str:
'''simple docstring'''
_A = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_snake_case , _snake_case ) for ch in message.upper() )
def _snake_case ( ) -> None:
'''simple docstring'''
_A = input('Enter message to encode or decode: ' ).strip()
_A = input('Enter keyword: ' ).strip()
_A = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
_A = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
_A = create_cipher_map(_snake_case )
print(func(_snake_case , _snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 271 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = GPTSwaTokenizer
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Tuple = GPTSwaTokenizer(_lowerCamelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[int]:
A_ : str = """This is a test"""
A_ : Dict = """This is a test"""
return input_text, output_text
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = """<s>"""
A_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_lowerCamelCase ) , 2000 )
def UpperCAmelCase_ ( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Dict = GPTSwaTokenizer(_lowerCamelCase )
A_ : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [465, 287, 265, 631, 842] )
A_ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
_lowerCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
A_ : List[str] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
A_ : Optional[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
# fmt: off
self.assertListEqual(
_lowerCamelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCAmelCase_ ( self ) -> int:
A_ : Any = GPTSwaTokenizer(_lowerCamelCase )
A_ : Optional[int] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
A_ : Dict = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertListEqual(tokenizer.encode_fast(_lowerCamelCase ) , _lowerCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(tokenizer.decode_fast(_lowerCamelCase ) , _lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Dict = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
A_ : Dict = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=_lowerCamelCase , )
| 344 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ) -> str:
A_ : Optional[int] = parent
A_ : Dict = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = patch_size
A_ : List[str] = num_channels
A_ : List[Any] = is_training
A_ : Union[str, Any] = use_labels
A_ : Union[str, Any] = hidden_size
A_ : str = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Any = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : str = scope
A_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ : Tuple = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 2
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : List[str] = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
A_ : int = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Dict = 1
A_ : Optional[int] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
A_ : Tuple = self.type_sequence_label_size
A_ : Tuple = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Dict = 1
A_ : Any = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = DeiTModelTester(self )
A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_lowerCamelCase )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Union[str, Any]:
A_ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[str] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : List[str] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Any = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A_ : List[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Union[str, Any] = model(**_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
A_ : Dict = problem_type["""title"""]
A_ : List[Any] = problem_type["""num_labels"""]
A_ : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
A_ : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
A_ : Tuple = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A_ : Union[str, Any] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
A_ : List[str] = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
A_ : Optional[int] = self.default_image_processor
A_ : str = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Any = model(**_lowerCamelCase )
# verify the logits
A_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : List[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : List[str] = prepare_img()
A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
A_ : Union[str, Any] = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : List[Any] = model(_lowerCamelCase )
| 344 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ (self ) -> Tuple:
UpperCamelCase = 1
UpperCamelCase = 3
UpperCamelCase = (32, 32)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def snake_case_ (self ) -> Any:
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def snake_case_ (self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case_ (self ) -> Dict:
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__a )
@property
def snake_case_ (self ) -> List[Any]:
def extract(*__a , **__a ):
class _lowerCamelCase :
def __init__(self ) -> List[Any]:
UpperCamelCase = torch.ones([0] )
def snake_case_ (self , __a ) -> Optional[Any]:
self.pixel_values.to(__a )
return self
return Out()
return extract
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
UpperCamelCase = StableDiffusionPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = torch.Generator(device=__a ).manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=__a ).manual_seed(0 )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=__a )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
UpperCamelCase = StableDiffusionPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = torch.Generator(device=__a ).manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=__a ).manual_seed(0 )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Any:
UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=__a )
assert isinstance(__a , __a )
assert isinstance(pipe.scheduler , __a )
assert pipe.safety_checker is None
UpperCamelCase = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(__a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=__a )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
UpperCamelCase = unet.half()
UpperCamelCase = vae.half()
UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase = StableDiffusionPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> int:
UpperCamelCase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__a )
UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
UpperCamelCase = 40_03_66_03_46
UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__a )
UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = "padme amidala taking a bath artwork, safe for work, no nudity"
UpperCamelCase = 27_34_97_17_55
UpperCamelCase = 7
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
UpperCamelCase = 10_44_35_52_34
UpperCamelCase = 12
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 358 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244 | 0 |
import sys
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( snake_case__: str = N ):
'''simple docstring'''
lowercase_ = -sys.maxsize - 1
for i in range(len(snake_case__ ) - 12 ):
lowercase_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase_ = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 30 |
"""simple docstring"""
from __future__ import annotations
__magic_name__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__magic_name__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , UpperCamelCase_ ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(UpperCamelCase_ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__magic_name__ = (
"from __main__ import arr, next_greatest_element_slow, "
"next_greatest_element_fast, next_greatest_element"
)
print(
"next_greatest_element_slow():",
timeit("next_greatest_element_slow(arr)", setup=setup),
)
print(
"next_greatest_element_fast():",
timeit("next_greatest_element_fast(arr)", setup=setup),
)
print(
" next_greatest_element():",
timeit("next_greatest_element(arr)", setup=setup),
)
| 100 | 0 |
from collections import deque
from .hash_table import HashTable
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = deque([]) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCamelCase)
_lowercase : Optional[Any] = self.values[key]
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return (
sum(self.charge_factor - len(lowerCamelCase) for slot in self.values)
/ self.size_table
* self.charge_factor
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Dict:
"""simple docstring"""
if not (
len(self.values[key]) == self.charge_factor and self.values.count(lowerCamelCase) == 0
):
return key
return super()._collision_resolution(lowerCamelCase, lowerCamelCase)
| 84 |
from collections import defaultdict
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : Optional[Any] = 1
_lowercase : Union[str, Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCamelCase_ )
if ret % 2 == 0:
cuts.append(lowerCamelCase_ )
return ret
def UpperCamelCase_( ) -> Optional[Any]:
dfs(1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = 10, 9
SCREAMING_SNAKE_CASE : List[Any] = defaultdict(list)
SCREAMING_SNAKE_CASE : dict[int, bool] = {}
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 84 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__snake_case :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase=None , _UpperCAmelCase=None ):
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class _A :
UpperCamelCase__ : List[str] = list_field(
default=[] ,metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} ,)
UpperCamelCase__ : List[int] = list_field(
default=[8] ,metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
UpperCamelCase__ : List[int] = list_field(
default=[8, 32, 128, 512] ,metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Benchmark training of model'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Verbose memory tracing'''} )
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} ,)
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} ,)
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Trace memory line by line'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Save result to a CSV file'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Save all print statements in a log file'''} )
UpperCamelCase__ : bool = field(default=__UpperCAmelCase ,metadata={'''help''': '''Whether to print environment information'''} )
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} ,)
UpperCamelCase__ : str = field(
default=F'''inference_time_{round(time() )}.csv''' ,metadata={'''help''': '''CSV filename used if saving time results to csv.'''} ,)
UpperCamelCase__ : str = field(
default=F'''inference_memory_{round(time() )}.csv''' ,metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} ,)
UpperCamelCase__ : str = field(
default=F'''train_time_{round(time() )}.csv''' ,metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} ,)
UpperCamelCase__ : str = field(
default=F'''train_memory_{round(time() )}.csv''' ,metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} ,)
UpperCamelCase__ : str = field(
default=F'''env_info_{round(time() )}.csv''' ,metadata={'''help''': '''CSV filename used if saving environment information.'''} ,)
UpperCamelCase__ : str = field(
default=F'''log_{round(time() )}.csv''' ,metadata={'''help''': '''Log filename used if print statements are saved in log.'''} ,)
UpperCamelCase__ : int = field(default=3 ,metadata={'''help''': '''Times an experiment will be run.'''} )
UpperCamelCase__ : bool = field(
default=__UpperCAmelCase ,metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} ,)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
if len(self.models) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''')
return self.models
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''')
return False
else:
return True
| 49 |
'''simple docstring'''
import math
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(UpperCAmelCase_ )
def __snake_case ( UpperCAmelCase_ : float = 1 / 12345 ):
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 3
while True:
lowerCamelCase_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(UpperCAmelCase_ ):
lowerCamelCase_ = int(UpperCAmelCase_ )
total_partitions += 1
if check_partition_perfect(UpperCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(UpperCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _UpperCamelCase (a__ :Dict ):
"""simple docstring"""
if not is_accelerate_available():
return method
UpperCamelCase__ = version.parse(accelerate.__version__ ).base_version
if version.parse(a__ ) < version.parse("""0.17.0""" ):
return method
def wrapper(self :Optional[Any] , *a__ :Optional[Any] , **a__ :List[str] ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *a__ , **a__ )
return wrapper
| 87 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87 | 1 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = BarthezTokenizer
__UpperCAmelCase : Optional[Any] = BarthezTokenizerFast
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : str = True
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().setUp()
_a : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_a )
_a : List[str] = tokenizer
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : str = '<pad>'
_a : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) ,_a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) ,_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_a ) ,10_1122 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,10_1122 )
@require_torch
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Optional[Any] = [0, 57, 3018, 7_0307, 91, 2]
_a : Optional[int] = self.tokenizer(
_a ,max_length=len(_a ) ,padding=_a ,truncation=_a ,return_tensors='pt' )
self.assertIsInstance(_a ,_a )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(_a ,_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = self.get_rust_tokenizer()
_a : Union[str, Any] = 'I was born in 92000, and this is falsé.'
_a : int = tokenizer.tokenize(_a )
_a : Optional[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = tokenizer.encode(_a ,add_special_tokens=_a )
_a : List[Any] = rust_tokenizer.encode(_a ,add_special_tokens=_a )
self.assertListEqual(_a ,_a )
_a : Optional[int] = self.get_rust_tokenizer()
_a : List[str] = tokenizer.encode(_a )
_a : int = rust_tokenizer.encode(_a )
self.assertListEqual(_a ,_a )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = {'input_ids': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Tuple = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_a ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_a ,)
| 271 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = None
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a ,'feature_size' ) )
self.assertTrue(hasattr(_a ,'sampling_rate' ) )
self.assertTrue(hasattr(_a ,'padding_value' ) )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_tester.prepare_inputs_for_common()
_a : str = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a ,processed_features[input_name] ) ) )
_a : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Union[str, Any] = BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
_a : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : str = BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
_a : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = feat_extract.model_input_names[0]
_a : int = BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
_a : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowercase ( self : Dict ,_a : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : Tuple ):
_a : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : Optional[Any] ,_a : Union[str, Any] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : int = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Tuple = BatchFeature({input_name: speech_inputs} )
_a : str = self.feat_extract_tester.seq_length_diff
_a : Dict = self.feat_extract_tester.max_seq_length + pad_diff
_a : Dict = self.feat_extract_tester.min_seq_length
_a : Optional[Any] = self.feat_extract_tester.batch_size
_a : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_a : int = feat_extract.pad(_a ,padding=_a )
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(_a ,padding='longest' )
_a : Any = input_a[input_name]
_a : Optional[Any] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
_a : List[str] = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
_a : str = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' )[input_name]
_a : int = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,return_tensors='np' )
_a : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_a : Tuple = feat_extract.pad(_a ,pad_to_multiple_of=10 )
_a : List[str] = input_a[input_name]
_a : str = feat_extract.pad(_a ,padding='longest' ,pad_to_multiple_of=10 )
_a : Tuple = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a )
_a : Any = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a ,return_tensors='np' ,)
_a : Dict = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
_a : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_a : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowercase ( self : List[Any] ,_a : Optional[int]=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : List[str] ):
_a : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : List[str] ,_a : List[str] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Any = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_a : Union[str, Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=_a )
_a : str = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
_a : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=_a ,)
_a : Any = input_a[input_name]
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
_a : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a ,return_tensors='np' ,)
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a )
_a : Tuple = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
_a : Dict = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' ,truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_a : Optional[Any] = 12
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,truncation=_a ,)
_a : Tuple = input_a[input_name]
_a : str = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,)
_a : List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_a : List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_a : Union[str, Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
self._check_truncation(numpify=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=_a )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Optional[int] = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : Dict = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : Any = feat_extract.pad(_a ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = self.feat_extract_dict
_a : List[Any] = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Tuple = [len(_a ) for x in speech_inputs]
_a : int = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : str = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_dict
_a : Tuple = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = [len(_a ) for x in speech_inputs]
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Any = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = min(_a )
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,truncation=_a ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 271 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase = 16000 ):
lowercase :Any = int(round(sample_rate * max_length ) )
if len(lowerCamelCase ) <= sample_length:
return wav
lowercase :Optional[int] = randint(0, len(lowerCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __lowerCAmelCase :
_a = field(default=lowerCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''})
_a = field(
default=lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''})
_a = field(
default=lowerCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''})
_a = field(
default=lowerCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''})
_a = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_a = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_a = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
_a = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''})
_a = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_a = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_a = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __lowerCAmelCase :
_a = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_a = field(
default=lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
_a = field(
default=lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''})
_a = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_a = field(
default=lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''})
_a = field(
default=lowerCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''})
_a = field(
default=lowerCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''})
_a = field(
default=lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_a = field(
default=lowerCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''})
_a = field(
default=lowerCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def UpperCAmelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase :Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase :Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase :Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification", lowerCamelCase, lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase :Tuple = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase :str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase :Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
lowercase :List[Any] = DatasetDict()
lowercase :Dict = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, use_auth_token=True if model_args.use_auth_token else None, )
lowercase :List[Any] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, use_auth_token=True if model_args.use_auth_token else None, )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase :Dict = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path, return_attention_mask=model_args.attention_mask, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase :int = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase :Optional[int] = feature_extractor.model_input_names[0]
def train_transforms(lowerCamelCase ):
lowercase :List[str] = []
for audio in batch[data_args.audio_column_name]:
lowercase :Dict = random_subsample(
audio["array"], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCamelCase )
lowercase :List[str] = feature_extractor(lowerCamelCase, sampling_rate=feature_extractor.sampling_rate )
lowercase :Optional[Any] = {model_input_name: inputs.get(lowerCamelCase )}
lowercase :Dict = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCamelCase ):
lowercase :Optional[int] = [audio["array"] for audio in batch[data_args.audio_column_name]]
lowercase :Dict = feature_extractor(lowerCamelCase, sampling_rate=feature_extractor.sampling_rate )
lowercase :Tuple = {model_input_name: inputs.get(lowerCamelCase )}
lowercase :Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase :Any = raw_datasets["train"].features[data_args.label_column_name].names
lowercase , lowercase :Dict = {}, {}
for i, label in enumerate(lowerCamelCase ):
lowercase :Optional[Any] = str(lowerCamelCase )
lowercase :Dict = label
# Load the accuracy metric from the datasets package
lowercase :List[str] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase ):
lowercase :List[str] = np.argmax(eval_pred.predictions, axis=1 )
return metric.compute(predictions=lowerCamelCase, references=eval_pred.label_ids )
lowercase :Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(lowerCamelCase ), labelaid=lowerCamelCase, idalabel=lowerCamelCase, finetuning_task="audio-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
lowercase :Optional[Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase :Dict = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCamelCase, output_all_columns=lowerCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase :Dict = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCamelCase, output_all_columns=lowerCamelCase )
# Initialize our trainer
lowercase :List[str] = Trainer(
model=lowerCamelCase, args=lowerCamelCase, train_dataset=raw_datasets["train"] if training_args.do_train else None, eval_dataset=raw_datasets["eval"] if training_args.do_eval else None, compute_metrics=lowerCamelCase, tokenizer=lowerCamelCase, )
# Training
if training_args.do_train:
lowercase :Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
lowercase :Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase :Dict = last_checkpoint
lowercase :List[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train", train_result.metrics )
trainer.save_metrics("train", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase :List[str] = trainer.evaluate()
trainer.log_metrics("eval", lowerCamelCase )
trainer.save_metrics("eval", lowerCamelCase )
# Write model card and (optionally) push to hub
lowercase :Union[str, Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
if __name__ == "__main__":
main()
| 158 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCAmelCase : Optional[int] = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
if not head:
return True
# split the list to two parts
_UpperCamelCase , _UpperCamelCase = head.next, head
while fast and fast.next:
_UpperCamelCase = fast.next.next
_UpperCamelCase = slow.next
_UpperCamelCase = slow.next
_UpperCamelCase = None # Don't forget here! But forget still works!
# reverse the second part
_UpperCamelCase = None
while second:
_UpperCamelCase = second.next
_UpperCamelCase = node
_UpperCamelCase = second
_UpperCamelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_UpperCamelCase = node.next
_UpperCamelCase = head.next
return True
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_UpperCamelCase = _UpperCamelCase = _UpperCamelCase = head
while fast and fast.next:
_UpperCamelCase , _UpperCamelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
_UpperCamelCase = [slow.val]
while slow.next:
_UpperCamelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_UpperCamelCase = cur.next
return True
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
if not head or not head.next:
return True
_UpperCamelCase = {}
_UpperCamelCase = 0
while head:
if head.val in d:
d[head.val].append(__a )
else:
_UpperCamelCase = [pos]
_UpperCamelCase = head.next
pos += 1
_UpperCamelCase = pos - 1
_UpperCamelCase = 0
for v in d.values():
if len(__a ) % 2 != 0:
middle += 1
else:
_UpperCamelCase = 0
for i in range(0, len(__a ) ):
if v[i] + v[len(__a ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 194 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def UpperCAmelCase_ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = """this is a test"""
UpperCamelCase__ = """this is a test"""
return input_text, output_text
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """<pad>"""
UpperCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3_00_01 )
def UpperCAmelCase_ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase__ = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase__ = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """This is a test"""
UpperCamelCase__ = [13, 1, 43_98, 25, 21, 12_89]
UpperCamelCase__ = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase__ = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCamelCase__ = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
UpperCamelCase__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode("""sequence builders""" )
UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , SCREAMING_SNAKE_CASE_ , )
@slow
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = {"""input_ids""": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 244 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
_A = list[tuple[int, int]]
_A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _lowercase :
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[int]:
lowerCamelCase : Tuple = pos_x
lowerCamelCase : List[str] = pos_y
lowerCamelCase : Optional[int] = (pos_y, pos_x)
lowerCamelCase : Tuple = goal_x
lowerCamelCase : int = goal_y
lowerCamelCase : Any = parent
class _lowercase :
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
lowerCamelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCAmelCase_ )
lowerCamelCase : Optional[int] = [self.start]
lowerCamelCase : Dict = False
def _UpperCamelCase ( self ) -> Path | None:
while self.node_queue:
lowerCamelCase : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase : List[str] = True
return self.retrace_path(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = self.get_successors(UpperCAmelCase_ )
for node in successors:
self.node_queue.append(UpperCAmelCase_ )
if not self.reached:
return [self.start.pos]
return None
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> list[Node]:
lowerCamelCase : Union[str, Any] = []
for action in delta:
lowerCamelCase : int = parent.pos_x + action[1]
lowerCamelCase : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCAmelCase_ , UpperCAmelCase_ , self.target.pos_y , self.target.pos_x , UpperCAmelCase_ ) )
return successors
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Path:
lowerCamelCase : str = node
lowerCamelCase : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase : str = current_node.parent
path.reverse()
return path
class _lowercase :
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[int]:
lowerCamelCase : int = BreadthFirstSearch(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = BreadthFirstSearch(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : Dict = False
def _UpperCamelCase ( self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase : Optional[int] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase : Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase : Dict = True
return self.retrace_bidirectional_path(
UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : Tuple = current_bwd_node
lowerCamelCase : Optional[int] = current_fwd_node
lowerCamelCase : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCAmelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> Path:
lowerCamelCase : str = self.fwd_bfs.retrace_path(UpperCAmelCase_ )
lowerCamelCase : Any = self.bwd_bfs.retrace_path(UpperCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_A = (0, 0)
_A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_A = time.time()
_A = BreadthFirstSearch(init, goal)
_A = bfs.search()
_A = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
_A = time.time()
_A = BidirectionalBreadthFirstSearch(init, goal)
_A = bd_bfs.search()
_A = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 357 |
"""simple docstring"""
def UpperCAmelCase ( a_ = 10 ):
'''simple docstring'''
if not isinstance(a_, a_ ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase : Union[str, Any] = 10**n
lowerCamelCase : int = 2_8433 * (pow(2, 783_0457, a_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 205 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _snake_case ( lowercase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(lowercase__ , """_dynamo""" ):
return False
return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule )
def _snake_case ( lowercase__ : Tuple , lowercase__ : bool = True ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCAmelCase_ :List[Any] = is_compiled_module(lowercase__ )
if is_compiled:
lowerCAmelCase_ :Optional[int] = model
lowerCAmelCase_ :Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :List[str] = model.module
if not keep_fpaa_wrapper:
lowerCAmelCase_ :Any = getattr(lowercase__ , """forward""" )
lowerCAmelCase_ :List[str] = model.__dict__.pop("""_original_forward""" , lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ , """__wrapped__""" ):
lowerCAmelCase_ :Dict = forward.__wrapped__
if forward == original_forward:
break
lowerCAmelCase_ :int = forward
if getattr(lowercase__ , """_converted_to_transformer_engine""" , lowercase__ ):
convert_model(lowercase__ , to_transformer_engine=lowercase__ )
if is_compiled:
lowerCAmelCase_ :List[Any] = model
lowerCAmelCase_ :Optional[int] = compiled_model
return model
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
PartialState().wait_for_everyone()
def _snake_case ( lowercase__ : List[str] , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ , lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ , lowercase__ )
@contextmanager
def _snake_case ( **lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
for key, value in kwargs.items():
lowerCAmelCase_ :Tuple = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _snake_case ( lowercase__ : List[str] ) -> Any:
'''simple docstring'''
if not hasattr(lowercase__ , """__qualname__""" ) and not hasattr(lowercase__ , """__name__""" ):
lowerCAmelCase_ :str = getattr(lowercase__ , """__class__""" , lowercase__ )
if hasattr(lowercase__ , """__qualname__""" ):
return obj.__qualname__
if hasattr(lowercase__ , """__name__""" ):
return obj.__name__
return str(lowercase__ )
def _snake_case ( lowercase__ : int , lowercase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :Tuple = destination.setdefault(lowercase__ , {} )
merge_dicts(lowercase__ , lowercase__ )
else:
lowerCAmelCase_ :int = value
return destination
def _snake_case ( lowercase__ : int = None ) -> bool:
'''simple docstring'''
if port is None:
lowerCAmelCase_ :Optional[Any] = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 84 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , )
assert hasattr(self , """env""" )
def __lowerCAmelCase ( self , __A ) -> Any:
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase_ :Union[str, Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase_ :Tuple = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , )
def __lowerCAmelCase ( self , __A ) -> List[Any]:
TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowerCAmelCase ( self , __A ) -> List[str]:
# create estimator
lowerCAmelCase_ :Any = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ :Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
| 84 | 1 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a_ = None
a_ = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a_ = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = None
# Automatically constructed
_lowerCamelCase = "PIL.Image.Image"
_lowerCamelCase = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
_lowerCamelCase = field(default="""Image""" , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ )
def __call__( self ):
'''simple docstring'''
return self.pa_type
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : List[str] = np.array(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(__lowerCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCamelCase )
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCamelCase )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
__A : Dict = {}
__A , __A : Tuple = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(__lowerCamelCase ):
__A : Union[str, Any] = PIL.Image.open(__lowerCamelCase )
else:
__A : List[Any] = path.split('''::''' )[-1]
try:
__A : Tuple = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['''repo_id''']
__A : str = token_per_repo_id.get(__lowerCamelCase )
except ValueError:
__A : Optional[Any] = None
with xopen(__lowerCamelCase , '''rb''' , use_auth_token=__lowerCamelCase ) as f:
__A : Optional[Any] = BytesIO(f.read() )
__A : List[str] = PIL.Image.open(bytes_ )
else:
__A : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def UpperCamelCase__( self ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
__A : Optional[Any] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
__A : str = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__A : str = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
__A : Optional[Any] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__A : Optional[Any] = storage.field('''bytes''' )
else:
__A : str = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__A : Optional[Any] = storage.field('''path''' )
else:
__A : Tuple = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
__A : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__A : List[Any] = pa.array(
[encode_np_array(np.array(__lowerCamelCase ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__A : List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
__A : Tuple = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase ):
with xopen(__lowerCamelCase , '''rb''' ) as f:
__A : Tuple = f.read()
return bytes_
__A : List[Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__A : List[Any] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
__A : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def __lowercase ( ) ->List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__A : str = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowercase ( snake_case_ : "PIL.Image.Image" ) ->bytes:
'''simple docstring'''
__A : Any = BytesIO()
if image.format in list_image_compression_formats():
__A : List[str] = image.format
else:
__A : Optional[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(snake_case_ ,format=snake_case_ )
return buffer.getvalue()
def __lowercase ( snake_case_ : "PIL.Image.Image" ) ->dict:
'''simple docstring'''
if hasattr(snake_case_ ,'''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(snake_case_ )}
def __lowercase ( snake_case_ : np.ndarray ) ->dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
__A : List[Any] = array.dtype
__A : Optional[Any] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
__A : Union[str, Any] = dtype.kind
__A : List[Any] = dtype.itemsize
__A : Tuple = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__A : List[Any] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__A : Optional[Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__A : int = dtype_byteorder + dtype_kind + str(snake_case_ )
__A : Optional[int] = np.dtype(snake_case_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
__A : Optional[Any] = PIL.Image.fromarray(array.astype(snake_case_ ) )
return {"path": None, "bytes": image_to_bytes(snake_case_ )}
def __lowercase ( snake_case_ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) ->List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
__A , __A : Dict = first_non_null_value(snake_case_ )
if isinstance(snake_case_ ,snake_case_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(snake_case_ ,np.ndarray ):
__A : List[str] = no_op_if_value_is_null(snake_case_ )
return [obj_to_image_dict_func(snake_case_ ) for obj in objs]
elif isinstance(snake_case_ ,PIL.Image.Image ):
__A : List[Any] = no_op_if_value_is_null(snake_case_ )
return [obj_to_image_dict_func(snake_case_ ) for obj in objs]
else:
return objs
else:
return objs
| 291 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """sew-d"""
def __init__( self , __lowerCamelCase=32 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase=2 , __lowerCamelCase=512 , __lowerCamelCase=256 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=("p2c", "c2p") , __lowerCamelCase="layer_norm" , __lowerCamelCase="gelu_python" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-7 , __lowerCamelCase=1e-5 , __lowerCamelCase="group" , __lowerCamelCase="gelu" , __lowerCamelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __lowerCamelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __lowerCamelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __lowerCamelCase=False , __lowerCamelCase=128 , __lowerCamelCase=16 , __lowerCamelCase=True , __lowerCamelCase=0.0_5 , __lowerCamelCase=10 , __lowerCamelCase=2 , __lowerCamelCase=0.0 , __lowerCamelCase=10 , __lowerCamelCase=0 , __lowerCamelCase="mean" , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=256 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase , pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase )
__A : str = hidden_size
__A : List[Any] = feat_extract_norm
__A : Tuple = feat_extract_activation
__A : Dict = list(__lowerCamelCase )
__A : int = list(__lowerCamelCase )
__A : List[Any] = list(__lowerCamelCase )
__A : Any = conv_bias
__A : List[Any] = num_conv_pos_embeddings
__A : Any = num_conv_pos_embedding_groups
__A : Optional[Any] = len(self.conv_dim )
__A : int = num_hidden_layers
__A : Union[str, Any] = intermediate_size
__A : Union[str, Any] = squeeze_factor
__A : int = max_position_embeddings
__A : Tuple = position_buckets
__A : Tuple = share_att_key
__A : List[str] = relative_attention
__A : Optional[Any] = norm_rel_ebd
__A : Dict = list(__lowerCamelCase )
__A : str = hidden_act
__A : List[str] = num_attention_heads
__A : Union[str, Any] = hidden_dropout
__A : Optional[int] = attention_dropout
__A : Optional[Any] = activation_dropout
__A : List[str] = feat_proj_dropout
__A : str = final_dropout
__A : Tuple = layer_norm_eps
__A : int = feature_layer_norm_eps
__A : Optional[int] = initializer_range
__A : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : int = apply_spec_augment
__A : Any = mask_time_prob
__A : Optional[int] = mask_time_length
__A : Any = mask_time_min_masks
__A : int = mask_feature_prob
__A : Tuple = mask_feature_length
__A : Dict = mask_feature_min_masks
# ctc loss
__A : Tuple = ctc_loss_reduction
__A : Union[str, Any] = ctc_zero_infinity
# sequence classification
__A : Tuple = use_weighted_layer_sum
__A : List[str] = classifier_proj_size
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 291 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87 | from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class a :
'''simple docstring'''
lowerCAmelCase : Any = MBartConfig
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : List[str] = 'gelu'
def __init__( self : int , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=13 , __snake_case : Any=7 , __snake_case : Tuple=True , __snake_case : List[str]=False , __snake_case : int=99 , __snake_case : Optional[Any]=32 , __snake_case : str=2 , __snake_case : int=4 , __snake_case : Tuple=37 , __snake_case : List[str]=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : str=20 , __snake_case : str=2 , __snake_case : Optional[int]=1 , __snake_case : Tuple=0 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ = prepare_mbart_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
UpperCAmelCase_ = TFMBartModel(config=__snake_case ).get_decoder()
UpperCAmelCase_ = inputs_dict['''input_ids''']
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ = inputs_dict['''head_mask''']
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
UpperCAmelCase_ = past_key_values[1]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : str=None , __UpperCamelCase : Any=None , ) -> Union[str, Any]:
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : str = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase : Optional[int] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase : str = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase : str = True
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Optional[Any] = False
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : str , __snake_case : int ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = TFMBartModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__snake_case )
def lowerCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : str = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowerCAmelCase : Dict = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowerCAmelCase : List[Any] = 'facebook/mbart-large-en-ro'
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase_ ( self : Optional[Any] , **__snake_case : List[str] ):
UpperCAmelCase_ = self.translate_src_text(**__snake_case )
self.assertListEqual(self.expected_text , __snake_case )
def lowerCamelCase_ ( self : Tuple , **__snake_case : Union[str, Any] ):
UpperCAmelCase_ = self.tokenizer(self.src_text , **__snake_case , return_tensors='''tf''' )
UpperCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase_ = self.tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
return generated_words
@slow
def lowerCamelCase_ ( self : str ):
self._assert_generated_batch_equal_expected()
| 365 |
import baseaa
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> bytes:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : bytes ) -> str:
return baseaa.baadecode(__UpperCamelCase ).decode('''utf-8''' )
if __name__ == "__main__":
_lowerCamelCase = 'Hello World!'
_lowerCamelCase = baseaa_encode(test)
print(encoded)
_lowerCamelCase = baseaa_decode(encoded)
print(decoded)
| 177 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
if openai_config_file == "":
_lowerCAmelCase = OpenAIGPTConfig()
else:
_lowerCAmelCase = OpenAIGPTConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = OpenAIGPTModel(SCREAMING_SNAKE_CASE_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
_lowerCAmelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_lowerCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 158 |
'''simple docstring'''
import math
import sys
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
_lowerCAmelCase = [-1] * (number + 1)
_lowerCAmelCase = 0
for i in range(1 , number + 1 ):
_lowerCAmelCase = sys.maxsize
_lowerCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
_lowerCAmelCase = 1 + answers[i - (j**2)]
_lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase : Any = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
UpperCamelCase : Any = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
@lru_cache()
def A ( ) -> List[Any]:
__UpperCamelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__UpperCamelCase = bs[:]
__UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
__UpperCamelCase = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def A ( snake_case :Union[str, Any] ) -> Tuple:
__UpperCamelCase = set()
__UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(__UpperCAmelCase )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
__UpperCamelCase = errors # how to handle errors in decoding
__UpperCamelCase = bytes_to_unicode()
__UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding='utf-8' ) as merges_handle:
__UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
__UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCamelCase = {}
__UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCamelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__UpperCamelCase = tuple(__UpperCAmelCase )
__UpperCamelCase = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
__UpperCamelCase = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase = bigram
__UpperCamelCase = []
__UpperCamelCase = 0
while i < len(__UpperCAmelCase ):
try:
__UpperCamelCase = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase = tuple(__UpperCAmelCase )
__UpperCamelCase = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
__UpperCamelCase = get_pairs(__UpperCAmelCase )
__UpperCamelCase = ' '.join(__UpperCAmelCase )
__UpperCamelCase = word
return word
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for token in re.findall(self.pat , __UpperCAmelCase ):
__UpperCamelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(' ' ) )
return bpe_tokens
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = ''.join(__UpperCAmelCase )
__UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + '\n' )
__UpperCamelCase = 0
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__UpperCamelCase = token_index
writer.write(' '.join(__UpperCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
__UpperCamelCase = ' ' + text
return (text, kwargs)
| 263 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : int = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
UpperCamelCase : Optional[Any] = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
UpperCamelCase : Any = {
"jukebox": 5_1_2,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_LYRIC_TOKENS_SIZES
lowercase = ["input_ids", "attention_mask"]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=["v3", "v2", "v2"] , __UpperCAmelCase=512 , __UpperCAmelCase=5 , __UpperCAmelCase="<|endoftext|>" , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
super().__init__(
unk_token=__UpperCAmelCase , n_genres=__UpperCAmelCase , version=__UpperCAmelCase , max_n_lyric_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = version
__UpperCamelCase = max_n_lyric_tokens
__UpperCamelCase = n_genres
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(__UpperCAmelCase )
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(__UpperCAmelCase )
with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase = json.load(__UpperCAmelCase )
__UpperCamelCase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
__UpperCamelCase = oov.replace(R'\-\'' , R'\-+\'' )
__UpperCamelCase = regex.compile(__UpperCAmelCase )
__UpperCamelCase = {v: k for k, v in self.artists_encoder.items()}
__UpperCamelCase = {v: k for k, v in self.genres_encoder.items()}
__UpperCamelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ):
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = [self.artists_encoder.get(__UpperCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(__UpperCAmelCase ) ):
__UpperCamelCase = [self.genres_encoder.get(__UpperCAmelCase , 0 ) for genre in list_genres[genres]]
__UpperCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__UpperCamelCase = [[self.lyrics_encoder.get(__UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return list(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prepare_for_tokenization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = self._tokenize(__UpperCAmelCase )
return artist, genre, lyrics
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__UpperCamelCase = artists[idx].lower()
__UpperCamelCase = [genres[idx].lower()]
else:
__UpperCamelCase = self._normalize(artists[idx] ) + '.v2'
__UpperCamelCase = [
self._normalize(__UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__UpperCamelCase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
__UpperCamelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
__UpperCamelCase = {vocab[index]: index + 1 for index in range(len(__UpperCAmelCase ) )}
__UpperCamelCase = 0
__UpperCamelCase = len(__UpperCAmelCase ) + 1
__UpperCamelCase = self.vocab
__UpperCamelCase = {v: k for k, v in self.vocab.items()}
__UpperCamelCase = ''
else:
__UpperCamelCase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
__UpperCamelCase = self._run_strip_accents(__UpperCAmelCase )
__UpperCamelCase = lyrics.replace('\\' , '\n' )
__UpperCamelCase = self.out_of_vocab.sub('' , __UpperCAmelCase ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = unicodedata.normalize('NFD' , __UpperCAmelCase )
__UpperCamelCase = []
for char in text:
__UpperCamelCase = unicodedata.category(__UpperCAmelCase )
if cat == "Mn":
continue
output.append(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = (
[chr(__UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(__UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(__UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
__UpperCamelCase = frozenset(__UpperCAmelCase )
__UpperCamelCase = re.compile(R'_+' )
__UpperCamelCase = ''.join([c if c in accepted else '_' for c in text.lower()] )
__UpperCamelCase = pattern.sub('_' , __UpperCAmelCase ).strip('_' )
return text
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return " ".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = TensorType(__UpperCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
__UpperCamelCase = tf.constant
__UpperCamelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
__UpperCamelCase = torch.tensor
__UpperCamelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
__UpperCamelCase = jnp.array
__UpperCamelCase = _is_jax
else:
__UpperCamelCase = np.asarray
__UpperCamelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__UpperCamelCase = [inputs]
if not is_tensor(__UpperCAmelCase ):
__UpperCamelCase = as_tensor(__UpperCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase="pt" ):
'''simple docstring'''
__UpperCamelCase = [0, 0, 0]
__UpperCamelCase = [artist] * len(self.version )
__UpperCamelCase = [genres] * len(self.version )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.tokenize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self._convert_token_to_id(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = [-INFINITY] * len(full_tokens[-1] )
__UpperCamelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__UpperCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__UpperCAmelCase ) )
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__UpperCAmelCase ) )
__UpperCamelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__UpperCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.artists_decoder.get(__UpperCAmelCase )
__UpperCamelCase = [self.genres_decoder.get(__UpperCAmelCase ) for genre in genres_index]
__UpperCamelCase = [self.lyrics_decoder.get(__UpperCAmelCase ) for character in lyric_index]
return artist, genres, lyrics
| 263 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) | 160 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __lowerCAmelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self , lowerCAmelCase=None , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(features=lowerCAmelCase )
_lowercase =torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , lowerCAmelCase ) and column:
if all(
isinstance(lowerCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , (str, bytes, type(lowerCAmelCase )) ):
return value
elif isinstance(lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowercase ={}
if isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_lowercase ={'dtype': torch.intaa}
elif isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowercase ={'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase , PIL.Image.Image ):
_lowercase =np.asarray(lowerCAmelCase )
return torch.tensor(lowerCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCAmelCase , '__array__' ) and not isinstance(lowerCAmelCase , torch.Tensor ):
_lowercase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCAmelCase , map_list=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Mapping:
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_row(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_row(lowerCAmelCase )
return self.recursive_tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_column(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_column(lowerCAmelCase , pa_table.column_names[0] )
_lowercase =self.recursive_tensorize(lowerCAmelCase )
_lowercase =self._consolidate(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ) -> Mapping:
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_batch(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_batch(lowerCAmelCase )
_lowercase =self.recursive_tensorize(lowerCAmelCase )
for column_name in batch:
_lowercase =self._consolidate(batch[column_name] )
return batch
| 205 | 0 |
def a__ ( UpperCAmelCase : list , UpperCAmelCase : list , UpperCAmelCase : int ) -> int:
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase : Tuple = [p / w for p, w in zip(UpperCAmelCase , UpperCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase : str = sorted(UpperCAmelCase )
# declaring useful variables
UpperCAmelCase : int = len(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Any = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase : str = sorted_profit_by_weight[length - i - 1]
UpperCAmelCase : Union[str, Any] = profit_by_weight.index(UpperCAmelCase )
UpperCAmelCase : str = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
_lowerCamelCase : Tuple = [int(x) for x in input("Input profits separated by spaces: ").split()]
_lowerCamelCase : Tuple = [int(x) for x in input("Input weights separated by spaces: ").split()]
_lowerCamelCase : Dict = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 99 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = ShapEImgaImgPipeline
UpperCamelCase = ["""image"""]
UpperCamelCase = ["""image"""]
UpperCamelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def __magic_name__ ( self : Optional[Any] ):
return 3_2
@property
def __magic_name__ ( self : Optional[int] ):
return 3_2
@property
def __magic_name__ ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Any ):
return 8
@property
def __magic_name__ ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=6_4, projection_dim=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
UpperCAmelCase : Any = CLIPVisionModel(__A )
return model
@property
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[int] = CLIPImageProcessor(
crop_size=2_2_4, do_center_crop=__A, do_normalize=__A, do_resize=__A, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=2_2_4, )
return image_processor
@property
def __magic_name__ ( self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase : Any = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase : List[Any] = PriorTransformer(**__A )
return model
@property
def __magic_name__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase : List[str] = ShapERenderer(**__A )
return model
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : str = self.dummy_prior
UpperCAmelCase : List[str] = self.dummy_image_encoder
UpperCAmelCase : List[Any] = self.dummy_image_processor
UpperCAmelCase : Dict = self.dummy_renderer
UpperCAmelCase : int = HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_0_2_4, prediction_type='''sample''', use_karras_sigmas=__A, clip_sample=__A, clip_sample_range=1.0, )
UpperCAmelCase : List[str] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : List[Any], __A : Dict, __A : List[Any]=0 ):
UpperCAmelCase : int = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith('''mps''' ):
UpperCAmelCase : List[str] = torch.manual_seed(__A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
UpperCAmelCase : Dict = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : str ):
UpperCAmelCase : Dict = '''cpu'''
UpperCAmelCase : Any = self.get_dummy_components()
UpperCAmelCase : Tuple = self.pipeline_class(**__A )
UpperCAmelCase : Dict = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__A ) )
UpperCAmelCase : Any = output.images[0]
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
UpperCAmelCase : Optional[int] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = torch_device == '''cpu'''
UpperCAmelCase : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=__A, relax_max_difference=__A, )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = self.pipeline_class(**__A )
UpperCAmelCase : List[str] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Any = 1
UpperCAmelCase : Any = 2
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__A )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase : Any = batch_size * [inputs[key]]
UpperCAmelCase : int = pipe(**__A, num_images_per_prompt=__A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
UpperCAmelCase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
UpperCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
UpperCAmelCase : int = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Dict = torch.Generator(device=__A ).manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
__A, generator=__A, guidance_scale=3.0, num_inference_steps=6_4, frame_size=6_4, output_type='''np''', ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__A, __A )
| 99 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ ) -> bool:
lowerCamelCase = len(snake_case__ )
# We need to create solution object to save path.
lowerCamelCase = [[0 for _ in range(snake_case__ )] for _ in range(snake_case__ )]
lowerCamelCase = run_maze(snake_case__ , 0 , 0 , snake_case__ )
if solved:
print("""\n""".join(str(snake_case__ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> bool:
lowerCamelCase = len(snake_case__ )
# Final check point.
if i == j == (size - 1):
lowerCamelCase = 1
return True
lowerCamelCase = (not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase = 1
# check for directions
if (
run_maze(snake_case__ , i + 1 , snake_case__ , snake_case__ )
or run_maze(snake_case__ , snake_case__ , j + 1 , snake_case__ )
or run_maze(snake_case__ , i - 1 , snake_case__ , snake_case__ )
or run_maze(snake_case__ , snake_case__ , j - 1 , snake_case__ )
):
return True
lowerCamelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__="attention" ) -> List[Any]:
lowerCamelCase = lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowerCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowerCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowerCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCamelCase = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowerCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ) -> List[str]:
if split_mlp_wi:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowerCamelCase = (wi_a, wi_a)
else:
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowerCamelCase = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def a__ ( snake_case__ , *, snake_case__ , snake_case__ , snake_case__ = False ) -> Dict:
lowerCamelCase = traverse_util.flatten_dict(variables["""target"""] )
lowerCamelCase = {"""/""".join(snake_case__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCamelCase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , snake_case__ )
lowerCamelCase = collections.OrderedDict()
# Shared embeddings.
lowerCamelCase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """encoder""" , """attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """encoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , snake_case__ , """encoder""" ).T
lowerCamelCase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """encoder""" ).T
lowerCamelCase = tax_relpos_bias_lookup(
snake_case__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(snake_case__ ):
# Block i, layer 0 (Self Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """self_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 1 (Cross Attention).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = tax_attention_lookup(snake_case__ , snake_case__ , """decoder""" , """encoder_decoder_attention""" )
lowerCamelCase = layer_norm
lowerCamelCase = k.T
lowerCamelCase = o.T
lowerCamelCase = q.T
lowerCamelCase = v.T
# Block i, layer 2 (MLP).
lowerCamelCase = tax_layer_norm_lookup(snake_case__ , snake_case__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCamelCase , lowerCamelCase = tax_mlp_lookup(snake_case__ , snake_case__ , """decoder""" , snake_case__ )
lowerCamelCase = layer_norm
if split_mlp_wi:
lowerCamelCase = wi[0].T
lowerCamelCase = wi[1].T
else:
lowerCamelCase = wi.T
lowerCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCamelCase = tax_relpos_bias_lookup(snake_case__ , snake_case__ , """decoder""" ).T
lowerCamelCase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCamelCase = old["""decoder/logits_dense/kernel"""].T
return new
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCamelCase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCamelCase = state_dict["""shared.weight"""]
return state_dict
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = checkpoints.load_tax_checkpoint(snake_case__ )
lowerCamelCase = convert_tax_to_pytorch(
snake_case__ , num_layers=config.num_layers , is_encoder_only=snake_case__ , scalable_attention=snake_case__ )
lowerCamelCase = make_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ , strict=snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = False , ) -> str:
lowerCamelCase = MTaConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCamelCase = UMTaEncoderModel(snake_case__ )
else:
lowerCamelCase = UMTaForConditionalGeneration(snake_case__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(snake_case__ )
# Verify that we can load the checkpoint.
model.from_pretrained(snake_case__ )
print("""Done""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 291 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger(__name__)
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: int = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['stage2', 'stage3', 'stage4'] , )
UpperCamelCase_: List[str] = DetaConfig(
backbone_config=UpperCAmelCase__ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=UpperCAmelCase__ , with_box_refine=UpperCAmelCase__ , two_stage=UpperCAmelCase__ , )
# set labels
UpperCamelCase_: List[Any] = 'huggingface/label-files'
if "o365" in model_name:
UpperCamelCase_: str = 3_6_6
UpperCamelCase_: int = 'object365-id2label.json'
else:
UpperCamelCase_: Optional[int] = 9_1
UpperCamelCase_: Optional[Any] = 'coco-detection-id2label.json'
UpperCamelCase_: Dict = num_labels
UpperCamelCase_: Optional[int] = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset' ) ) , 'r' ) )
UpperCamelCase_: int = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
UpperCamelCase_: Optional[Any] = idalabel
UpperCamelCase_: Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def snake_case (UpperCAmelCase__ ) -> int:
UpperCamelCase_: Dict = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
UpperCamelCase_: str = dct.pop(UpperCAmelCase__ )
UpperCamelCase_: List[str] = val
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase_: int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase_: str = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
UpperCamelCase_: Dict = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_: Any = in_proj_weight[:dim, :]
UpperCamelCase_: List[str] = in_proj_bias[: dim]
UpperCamelCase_: Any = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase_: List[str] = in_proj_bias[
dim : dim * 2
]
UpperCamelCase_: Tuple = in_proj_weight[
-dim :, :
]
UpperCamelCase_: str = in_proj_bias[-dim :]
# fmt: on
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
# transformer decoder self-attention layers
UpperCamelCase_: Optional[int] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase_: Union[str, Any] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase_: Dict = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_: Tuple = in_proj_weight[:hidden_size, :]
UpperCamelCase_: Dict = in_proj_bias[:hidden_size]
UpperCamelCase_: List[str] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase_: Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase_: Optional[int] = in_proj_weight[-hidden_size:, :]
UpperCamelCase_: Tuple = in_proj_bias[-hidden_size:]
def snake_case () -> Tuple:
UpperCamelCase_: int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_: Tuple = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
UpperCamelCase_: List[str] = get_deta_config(UpperCAmelCase__ )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase_: List[Any] = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
UpperCamelCase_: str = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
UpperCamelCase_: Any = torch.load(UpperCAmelCase__ , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(UpperCAmelCase__ , param.shape )
# rename keys
UpperCamelCase_: Any = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_swin_q_k_v(UpperCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase_: Dict = state_dict.pop(UpperCAmelCase__ )
UpperCamelCase_: Any = val
if "input_proj" in key:
UpperCamelCase_: Optional[int] = state_dict.pop(UpperCAmelCase__ )
UpperCamelCase_: Tuple = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase_: str = state_dict.pop(UpperCAmelCase__ )
UpperCamelCase_: Tuple = val
# finally, create HuggingFace model and load state dict
UpperCamelCase_: Any = DetaForObjectDetection(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
UpperCamelCase_: int = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(UpperCAmelCase__ )
# load image processor
UpperCamelCase_: Optional[Any] = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
UpperCamelCase_: Optional[int] = prepare_img()
UpperCamelCase_: List[Any] = processor(images=UpperCAmelCase__ , return_tensors='pt' )
UpperCamelCase_: str = encoding['pixel_values']
UpperCamelCase_: Any = model(pixel_values.to(UpperCAmelCase__ ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase_: int = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCamelCase_: Any = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase_: Dict = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCamelCase_: int = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCAmelCase__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCAmelCase__ ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A_ : Optional[int] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 292 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=None , UpperCAmelCase__="no" , UpperCAmelCase__="29500" ) -> List[Any]:
UpperCamelCase_: Any = False
UpperCamelCase_: List[str] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
UpperCamelCase_: List[Any] = True
elif "IPython" in sys.modules:
UpperCamelCase_: List[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
UpperCamelCase_: Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , UpperCAmelCase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
UpperCamelCase_: List[str] = 8
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='TPU' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*UpperCAmelCase__ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port=UpperCAmelCase__ , mixed_precision=UpperCAmelCase__ ):
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='MULTI_GPU' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase_: Tuple = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=2 ) -> Optional[int]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , debug=UpperCAmelCase__ )
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' ) | 292 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class a__ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=4 , ):
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : List[str] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : List[Any] = is_training
_lowercase : int = use_attention_mask
_lowercase : Tuple = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : str = vocab_size
_lowercase : str = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = intermediate_size
_lowercase : List[str] = hidden_act
_lowercase : Tuple = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : str = num_choices
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : List[Any] = None
if self.use_attention_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : List[Any] = None
if self.use_token_type_ids:
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
_lowercase : Optional[Any] = config_and_inputs
_lowercase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class a__ ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = FlaxRoFormerModelTester(self )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=_UpperCAmelCase )
_lowercase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class a__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Any = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_lowercase : Optional[int] = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[Any] = model(_UpperCAmelCase )[0]
_lowercase : str = 50000
_lowercase : Tuple = (1, 6, vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
_lowercase : List[Any] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 250 | """simple docstring"""
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
lowercase__: str = []
lowercase__: str = 1_1
lowercase__: str = int('''1''' + '''0''' * digit_len )
for num in range(__UpperCAmelCase , __UpperCAmelCase ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__UpperCAmelCase , __UpperCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowercase__: Dict = 1_0
return solutions
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 2 ) -> int:
lowercase__: List[str] = 1.0
for fraction in fraction_list(__UpperCAmelCase ):
lowercase__: List[str] = Fraction(__UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 177 | 0 |
'''simple docstring'''
import cva
import numpy as np
class _UpperCAmelCase :
def __init__( self : int , __UpperCAmelCase : float , __UpperCAmelCase : int ):
'''simple docstring'''
if k in (0.04, 0.06):
_A = k
_A = window_size
else:
raise ValueError("invalid k value" )
def __str__( self : List[Any] ):
'''simple docstring'''
return str(self.k )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : str ):
'''simple docstring'''
_A = cva.imread(__UpperCAmelCase , 0 )
_A , _A = img.shape
_A = []
_A = img.copy()
_A = cva.cvtColor(__UpperCAmelCase , cva.COLOR_GRAY2RGB )
_A , _A = np.gradient(__UpperCAmelCase )
_A = dx**2
_A = dy**2
_A = dx * dy
_A = 0.04
_A = self.window_size // 2
for y in range(__UpperCAmelCase , h - offset ):
for x in range(__UpperCAmelCase , w - offset ):
_A = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_A = (wxx * wyy) - (wxy**2)
_A = wxx + wyy
_A = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 356 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any]="" , __UpperCAmelCase : List[str]="train" ):
'''simple docstring'''
assert os.path.isdir(__UpperCAmelCase )
_A = []
_A = os.listdir(__UpperCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_A = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if not os.path.isfile(__UpperCAmelCase ):
continue
self.documents.append(__UpperCAmelCase )
def __len__( self : str ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.documents[idx]
_A = document_path.split("/" )[-1]
with open(__UpperCAmelCase , encoding="utf-8" ) as source:
_A = source.read()
_A , _A = process_story(__UpperCAmelCase )
return document_name, story_lines, summary_lines
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
_A = list(filter(lambda __lowercase : len(__lowercase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
_A = [_add_missing_period(__lowercase ) for line in nonempty_lines]
# gather article lines
_A = []
_A = deque(__lowercase )
while True:
try:
_A = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(__lowercase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_A = list(filter(lambda __lowercase : not t.startswith("@highlight" ) , __lowercase ) )
return story_lines, summary_lines
def __lowercase ( __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
if len(__lowercase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowercase )) )
return sequence
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = torch.ones_like(__lowercase )
_A = sequence == pad_token_id
_A = 0
return mask
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = [tokenizer.encode(__lowercase ) for line in story_lines]
_A = [token for sentence in story_lines_token_ids for token in sentence]
_A = [tokenizer.encode(__lowercase ) for line in summary_lines]
_A = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __lowercase ( __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A = []
for sequence in batch:
_A = -1
_A = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowercase )
return torch.tensor(__lowercase )
| 174 | 0 |
"""simple docstring"""
from math import sqrt
def lowerCamelCase_ (UpperCamelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ (UpperCamelCase__ : int = 1_0001 ):
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Tuple = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(f"{solution() = }")
| 263 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ (UpperCamelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0]
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ):
return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 263 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__: int = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Optional[int] = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a__: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 39 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = 1.0,__lowerCamelCase = None,):
super().__init__()
A__ = initial_learning_rate
A__ = warmup_steps
A__ = power
A__ = decay_schedule_fn
A__ = name
def __call__( self,__lowerCamelCase ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A__ = tf.cast(__lowerCamelCase,tf.floataa )
A__ = tf.cast(self.warmup_steps,tf.floataa )
A__ = global_step_float / warmup_steps_float
A__ = self.initial_learning_rate * tf.math.pow(__lowerCamelCase,self.power )
return tf.cond(
global_step_float < warmup_steps_float,lambda: warmup_learning_rate,lambda: self.decay_schedule_fn(step - self.warmup_steps ),name=__lowerCamelCase,)
def UpperCamelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 0.9 , UpperCamelCase__ : float = 0.999 , UpperCamelCase__ : float = 1e-8 , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[List[str]] = None , )->int:
A__ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCamelCase__ , )
if num_warmup_steps:
A__ = WarmUp(
initial_learning_rate=UpperCamelCase__ , decay_schedule_fn=UpperCamelCase__ , warmup_steps=UpperCamelCase__ , )
if weight_decay_rate > 0.0:
A__ = AdamWeightDecay(
learning_rate=UpperCamelCase__ , weight_decay_rate=UpperCamelCase__ , beta_a=UpperCamelCase__ , beta_a=UpperCamelCase__ , epsilon=UpperCamelCase__ , clipnorm=UpperCamelCase__ , global_clipnorm=UpperCamelCase__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=UpperCamelCase__ , )
else:
A__ = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase__ , beta_a=UpperCamelCase__ , beta_a=UpperCamelCase__ , epsilon=UpperCamelCase__ , clipnorm=UpperCamelCase__ , global_clipnorm=UpperCamelCase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,__lowerCamelCase = 0.001,__lowerCamelCase = 0.9,__lowerCamelCase = 0.999,__lowerCamelCase = 1E-7,__lowerCamelCase = False,__lowerCamelCase = 0.0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = "AdamWeightDecay",**__lowerCamelCase,):
super().__init__(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase )
A__ = weight_decay_rate
A__ = include_in_weight_decay
A__ = exclude_from_weight_decay
@classmethod
def UpperCamelCase ( cls,__lowerCamelCase ):
A__ = {'''WarmUp''': WarmUp}
return super(__lowerCamelCase,cls ).from_config(__lowerCamelCase,custom_objects=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
super(__lowerCamelCase,self )._prepare_local(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = tf.constant(
self.weight_decay_rate,name='''adam_weight_decay_rate''' )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''],use_locking=self._use_locking,)
return tf.no_op()
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None,**__lowerCamelCase ):
A__ , A__ = list(zip(*__lowerCamelCase ) )
return super(__lowerCamelCase,self ).apply_gradients(zip(__lowerCamelCase,__lowerCamelCase ),name=__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A__ = apply_state or {}
A__ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A__ = self._fallback_apply_state(__lowerCamelCase,__lowerCamelCase )
A__ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None ):
A__ , A__ = self._get_lr(var.device,var.dtype.base_dtype,__lowerCamelCase )
A__ = self._decay_weights_op(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCamelCase,self )._resource_apply_dense(__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None ):
A__ , A__ = self._get_lr(var.device,var.dtype.base_dtype,__lowerCamelCase )
A__ = self._decay_weights_op(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCamelCase,self )._resource_apply_sparse(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def UpperCamelCase ( self,__lowerCamelCase ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__lowerCamelCase,__lowerCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__lowerCamelCase,__lowerCamelCase ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self ):
A__ = []
A__ = None
@property
def UpperCamelCase ( self ):
if self._accum_steps is None:
A__ = tf.Variable(
tf.constant(0,dtype=tf.intaa ),trainable=__lowerCamelCase,synchronization=tf.VariableSynchronization.ON_READ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,)
return self._accum_steps.value()
@property
def UpperCamelCase ( self ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self,__lowerCamelCase ):
if not self._gradients:
A__ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__lowerCamelCase ),trainable=__lowerCamelCase,synchronization=tf.VariableSynchronization.ON_READ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__lowerCamelCase ) != len(self._gradients ):
raise ValueError(f"Expected {len(self._gradients )} gradients, but got {len(__lowerCamelCase )}" )
for accum_gradient, gradient in zip(self._gradients,__lowerCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__lowerCamelCase )
self._accum_steps.assign_add(1 )
def UpperCamelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__lowerCamelCase ) )
| 39 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def A_ ( A__ , A__ ) -> Any:
a__ : Any = k_size // 2
a__ , a__ : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
a__ : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(A__ ) + square(A__ )) / (2 * square(A__ )) )
return g
def A_ ( A__ , A__ , A__ ) -> List[Any]:
a__ , a__ : Optional[int] = image.shape[0], image.shape[1]
# dst image height and width
a__ : List[str] = height - k_size + 1
a__ : List[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
a__ : Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
a__ : int = 0
for i, j in product(range(A__ ) , range(A__ ) ):
a__ : str = ravel(image[i : i + k_size, j : j + k_size] )
a__ : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
a__ : Union[str, Any] = gen_gaussian_kernel(A__ , A__ )
a__ : Any = ravel(A__ )
# reshape and get the dst image
a__ : Optional[Any] = dot(A__ , A__ ).reshape(A__ , A__ ).astype(A__ )
return dst
if __name__ == "__main__":
# read original image
lowercase : Optional[int] = imread(r"""../image_data/lena.jpg""")
# turn image in gray scale value
lowercase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase : Dict = gaussian_filter(gray, 3, sigma=1)
lowercase : Optional[int] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 99 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : List[str] = batch_size
a__ : List[str] = image_size
a__ : Dict = patch_size
a__ : Optional[Any] = num_channels
a__ : List[Any] = is_training
a__ : str = use_labels
a__ : Dict = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : List[str] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : Dict = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Optional[int] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : List[str] = (image_size // patch_size) ** 2
a__ : Any = num_patches + 1
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : Tuple = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : int = ViTMSNModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.type_sequence_label_size
a__ : List[str] = ViTMSNForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase , labels=lowercase)
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}')
print('Labels: {labels}')
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a__ : List[str] = 1
a__ : Optional[int] = ViTMSNForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ : Dict = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[Any] = config_and_inputs
a__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__A : Tuple = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__A : List[str] = False
__A : Optional[Any] = False
__A : Union[str, Any] = False
__A : Any = False
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = ViTMSNModelTester(self)
a__ : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds')
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowercase)
a__ : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = ViTMSNModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> Dict:
a__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small') if is_vision_available() else None
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
torch.manual_seed(2)
a__ : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small').to(lowercase)
a__ : Any = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : Tuple = model(**lowercase)
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Any = torch.tensor([-0.08_03, -0.44_54, -0.23_75]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
| 99 | 1 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _A (lowerCAmelCase__ :Callable ) -> Callable:
'''simple docstring'''
@wraps(lowerCAmelCase__ )
def _inner_fn(*lowerCAmelCase__ :str , **lowerCAmelCase__ :Optional[int] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , lowerCAmelCase__ , )
return fn(*lowerCAmelCase__ , **lowerCAmelCase__ )
return _inner_fn
| 104 |
'''simple docstring'''
from timeit import timeit
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
_a = 0
while number:
number &= number - 1
result += 1
return result
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
_a = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A () -> None:
'''simple docstring'''
def do_benchmark(lowerCAmelCase__ :int ) -> None:
_a = 'import __main__ as z'
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(lowerCAmelCase__ ) = }' )
_a = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=lowerCAmelCase__ )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase__ ) = }' )
_a = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=lowerCAmelCase__ , )
print(f'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 104 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Any = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ['PerceiverFeatureExtractor']
_snake_case : str = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 1 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __UpperCamelCase :
def __init__( self :Any ,_UpperCamelCase :Optional[Any] ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
snake_case_ : List[str] = deepcopy(_UpperCamelCase )
elif os.path.exists(_UpperCamelCase ):
with io.open(_UpperCamelCase ,"""r""" ,encoding="""utf-8""" ) as f:
snake_case_ : List[Any] = json.load(_UpperCamelCase )
else:
try:
snake_case_ : int = baseaa.urlsafe_baadecode(_UpperCamelCase ).decode("""utf-8""" )
snake_case_ : List[str] = json.loads(_UpperCamelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
snake_case_ : int = config
self.set_stage_and_offload()
def a__ ( self :Tuple ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
snake_case_ : List[Any] = self.get_value("""zero_optimization.stage""" ,-1 )
# offload
snake_case_ : List[Any] = False
if self.is_zeroa() or self.is_zeroa():
snake_case_ : Tuple = set(["""cpu""", """nvme"""] )
snake_case_ : Optional[int] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
snake_case_ : Tuple = True
def a__ ( self :int ,_UpperCamelCase :List[Any] ):
snake_case_ : str = self.config
# find the config node of interest if it exists
snake_case_ : List[str] = ds_key_long.split(""".""" )
snake_case_ : str = nodes.pop()
for node in nodes:
snake_case_ : Optional[Any] = config.get(_UpperCamelCase )
if config is None:
return None, ds_key
return config, ds_key
def a__ ( self :Optional[Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[str]=None ):
snake_case_ , snake_case_ : Union[str, Any] = self.find_config_node(_UpperCamelCase )
if config is None:
return default
return config.get(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :int ,_UpperCamelCase :str ,_UpperCamelCase :Any=False ):
snake_case_ : int = self.config
# find the config node of interest if it exists
snake_case_ : List[str] = ds_key_long.split(""".""" )
for node in nodes:
snake_case_ : Optional[Any] = config
snake_case_ : Tuple = config.get(_UpperCamelCase )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(_UpperCamelCase )
def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ):
snake_case_ : Optional[int] = self.get_value(_UpperCamelCase )
return False if value is None else bool(_UpperCamelCase )
def a__ ( self :Any ,_UpperCamelCase :List[Any] ):
snake_case_ : Optional[int] = self.get_value(_UpperCamelCase )
return False if value is None else not bool(_UpperCamelCase )
def a__ ( self :Dict ):
return self._stage == 2
def a__ ( self :List[str] ):
return self._stage == 3
def a__ ( self :List[str] ):
return self._offload
class __UpperCamelCase :
def __init__( self :Optional[int] ,_UpperCamelCase :List[str] ):
snake_case_ : Any = engine
def a__ ( self :Optional[Any] ,_UpperCamelCase :Any ,**_UpperCamelCase :Dict ):
# runs backpropagation and handles mixed precision
self.engine.backward(_UpperCamelCase ,**_UpperCamelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __UpperCamelCase ( lowercase__ ):
def __init__( self :str ,_UpperCamelCase :Optional[Any] ):
super().__init__(_UpperCamelCase ,device_placement=_UpperCamelCase ,scaler=_UpperCamelCase )
snake_case_ : int = hasattr(self.optimizer ,"""overflow""" )
def a__ ( self :int ,_UpperCamelCase :Any=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def a__ ( self :str ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def a__ ( self :int ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __UpperCamelCase ( lowercase__ ):
def __init__( self :Dict ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :int ):
super().__init__(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Optional[int] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __UpperCamelCase :
def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :str=0.0_01 ,_UpperCamelCase :str=0 ,**_UpperCamelCase :str ):
snake_case_ : Any = params
snake_case_ : Dict = lr
snake_case_ : Union[str, Any] = weight_decay
snake_case_ : Any = kwargs
class __UpperCamelCase :
def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any=None ,_UpperCamelCase :Optional[Any]=0 ,**_UpperCamelCase :Any ):
snake_case_ : Dict = optimizer
snake_case_ : Tuple = total_num_steps
snake_case_ : Optional[int] = warmup_num_steps
snake_case_ : Union[str, Any] = kwargs | 8 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ):
'''simple docstring'''
snake_case_ : Tuple = x_start
snake_case_ : Optional[int] = fnc(lowerCamelCase_ )
snake_case_ : Optional[int] = 0.0
for _ in range(lowerCamelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
snake_case_ : int = (x_end - x_start) / steps + xa
snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
snake_case_ : Any = xa
snake_case_ : str = fxa
return area
if __name__ == "__main__":
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
__A : List[str] = 10
while i <= 100_000:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 10 | 8 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( __A , unittest.TestCase ):
A = KandinskyVaaImgaImgPipeline
A = ['image_embeds', 'negative_image_embeds', 'image']
A = [
'image_embeds',
'negative_image_embeds',
'image',
]
A = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A = False
@property
def __snake_case (self ) -> List[Any]:
return 32
@property
def __snake_case (self ) -> Tuple:
return 32
@property
def __snake_case (self ) -> List[str]:
return self.time_input_dim
@property
def __snake_case (self ) -> Optional[int]:
return self.time_input_dim * 4
@property
def __snake_case (self ) -> Optional[Any]:
return 100
@property
def __snake_case (self ) -> List[str]:
torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase_: List[str] = UNetaDConditionModel(**__lowercase )
return model
@property
def __snake_case (self ) -> Union[str, Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case (self ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase_: Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: str = self.dummy_unet
UpperCAmelCase_: List[Any] = self.dummy_movq
UpperCAmelCase_: Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCAmelCase_: List[str] = DDIMScheduler(**__lowercase )
UpperCAmelCase_: List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Optional[Any]:
UpperCAmelCase_: int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(__lowercase ) ).to(__lowercase )
UpperCAmelCase_: int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
__lowercase )
# create init_image
UpperCAmelCase_: Optional[int] = floats_tensor((1, 3, 64, 64), rng=random.Random(__lowercase ) ).to(__lowercase )
UpperCAmelCase_: Any = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCAmelCase_: Optional[int] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) )
if str(__lowercase ).startswith("""mps""" ):
UpperCAmelCase_: List[Any] = torch.manual_seed(__lowercase )
else:
UpperCAmelCase_: str = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
UpperCAmelCase_: int = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __snake_case (self ) -> int:
UpperCAmelCase_: Any = """cpu"""
UpperCAmelCase_: List[str] = self.get_dummy_components()
UpperCAmelCase_: Any = self.pipeline_class(**__lowercase )
UpperCAmelCase_: Any = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase_: Union[str, Any] = pipe(**self.get_dummy_inputs(__lowercase ) )
UpperCAmelCase_: Tuple = output.images
UpperCAmelCase_: List[Any] = pipe(
**self.get_dummy_inputs(__lowercase ), return_dict=__lowercase, )[0]
UpperCAmelCase_: List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_: List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_: str = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
UpperCAmelCase_: List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase_: List[Any] = """A red cartoon frog, 4k"""
UpperCAmelCase_: int = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""", torch_dtype=torch.floataa )
pipe_prior.to(__lowercase )
UpperCAmelCase_: Any = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""", torch_dtype=torch.floataa )
UpperCAmelCase_: str = pipeline.to(__lowercase )
pipeline.set_progress_bar_config(disable=__lowercase )
UpperCAmelCase_: int = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = pipe_prior(
__lowercase, generator=__lowercase, num_inference_steps=5, negative_prompt="""""", ).to_tuple()
UpperCAmelCase_: Union[str, Any] = pipeline(
image=__lowercase, image_embeds=__lowercase, negative_image_embeds=__lowercase, generator=__lowercase, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="""np""", )
UpperCAmelCase_: int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase, __lowercase )
| 147 |
'''simple docstring'''
from math import factorial, radians
def __magic_name__( lowerCamelCase, lowerCamelCase = 1_8, lowerCamelCase = 1_0):
__lowerCAmelCase = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__lowerCAmelCase = radians(lowerCamelCase)
__lowerCAmelCase = angle_in_radians
__lowerCAmelCase = 3
__lowerCAmelCase = -1
for _ in range(lowerCamelCase):
result += (b * (angle_in_radians**a)) / factorial(lowerCamelCase)
__lowerCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCamelCase, lowerCamelCase)
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 174 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__SCREAMING_SNAKE_CASE = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__SCREAMING_SNAKE_CASE = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ : int = SavedModel()
lowercase_ : Union[str, Any] = []
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
lowercase_ : List[str] = json.load(__SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__SCREAMING_SNAKE_CASE )] )
with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
lowercase_ : Any = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowercase_ : int = sorted(__SCREAMING_SNAKE_CASE )
lowercase_ : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__SCREAMING_SNAKE_CASE )
if strict and len(__SCREAMING_SNAKE_CASE ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__SCREAMING_SNAKE_CASE ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 356 | """simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321 | 0 |
from functools import reduce
_a = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __A ( __lowerCAmelCase = N )-> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __lowerCAmelCase , __lowerCAmelCase : str(int(__lowerCAmelCase ) * int(__lowerCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(__lowerCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 39 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
_UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_a = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_a = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
try:
_UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __A ( __lowerCAmelCase , __lowerCAmelCase = "student" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , )-> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_UpperCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}"""
_UpperCAmelCase = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
_UpperCAmelCase = teacher.config_class(**__lowerCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_UpperCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 39 | 1 |
import os
import sys
import transformers
SCREAMING_SNAKE_CASE__ = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 367 | """simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCAmelCase__ ( _UpperCamelCase : int = 8 ) -> str:
"""simple docstring"""
snake_case = ascii_letters + digits + punctuation
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int ) -> str:
"""simple docstring"""
i -= len(_UpperCamelCase )
snake_case = i // 3
snake_case = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case = (
chars_incl
+ random(_UpperCamelCase , quotient + remainder )
+ random(_UpperCamelCase , _UpperCamelCase )
+ random(_UpperCamelCase , _UpperCamelCase )
)
snake_case = list(_UpperCamelCase )
shuffle(_UpperCamelCase )
return "".join(_UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int ) -> str:
"""simple docstring"""
return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : int = 8 ) -> bool:
"""simple docstring"""
if len(_UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case = any(char in ascii_uppercase for char in password )
snake_case = any(char in ascii_lowercase for char in password )
snake_case = any(char in digits for char in password )
snake_case = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCAmelCase__ ( ) -> Any:
"""simple docstring"""
snake_case = int(input('Please indicate the max length of your password: ' ).strip() )
snake_case = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(_UpperCamelCase ) )
print(
'Alternative Password generated:' , alternative_password_generator(_UpperCamelCase , _UpperCamelCase ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 149 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _A ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def _A ( A__ , A__ , A__ , A__ = False ):
"""simple docstring"""
__lowercase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowercase = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__lowercase = '''cpu'''
__lowercase = Path(A__ )
# VAE DECODER
__lowercase = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
__lowercase = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowercase = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 104 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
lowerCAmelCase = []
lowerCAmelCase = 11
lowerCAmelCase = int('1' + '0' * digit_len )
for num in range(_UpperCAmelCase , _UpperCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_UpperCAmelCase , _UpperCAmelCase ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
lowerCAmelCase = 10
return solutions
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int = 2 ):
lowerCAmelCase = 1.0
for fraction in fraction_list(_UpperCAmelCase ):
lowerCAmelCase = Fraction(_UpperCAmelCase )
result *= frac.denominator / frac.numerator
return int(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 358 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class a :
snake_case__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
snake_case__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
snake_case__ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case__ = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.task_name.lower()
class a ( a__ ):
snake_case__ = '''train'''
snake_case__ = '''dev'''
snake_case__ = '''test'''
class a ( a__ ):
snake_case__ = 42
snake_case__ = 42
snake_case__ = 42
def __init__( self , _snake_case , _snake_case , _snake_case = None , _snake_case = Split.train , _snake_case = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _snake_case , )
lowerCAmelCase = args
lowerCAmelCase = glue_processors[args.task_name]()
lowerCAmelCase = glue_output_modes[args.task_name]
if isinstance(_snake_case , _snake_case ):
try:
lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
lowerCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase ,lowerCAmelCase = label_list[2], label_list[1]
lowerCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase = cached_features_file + '.lock'
with FileLock(_snake_case ):
if os.path.exists(_snake_case ) and not args.overwrite_cache:
lowerCAmelCase = time.time()
lowerCAmelCase = torch.load(_snake_case )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase = examples[:limit_length]
lowerCAmelCase = glue_convert_examples_to_features(
_snake_case , _snake_case , max_length=args.max_seq_length , label_list=_snake_case , output_mode=self.output_mode , )
lowerCAmelCase = time.time()
torch.save(self.features , _snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _snake_case ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
| 309 | 0 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class snake_case_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : Dict ) ->Any:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
snake_case_ = deepcopy(_UpperCamelCase )
elif os.path.exists(_UpperCamelCase ):
with io.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ = json.load(_UpperCamelCase )
else:
try:
snake_case_ = baseaa.urlsafe_baadecode(_UpperCamelCase ).decode('''utf-8''' )
snake_case_ = json.loads(_UpperCamelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
snake_case_ = config
self.set_stage_and_offload()
def snake_case__( self : Optional[int] ) ->Optional[Any]:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
snake_case_ = self.get_value('''zero_optimization.stage''' , -1 )
# offload
snake_case_ = False
if self.is_zeroa() or self.is_zeroa():
snake_case_ = set(['''cpu''', '''nvme'''] )
snake_case_ = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
snake_case_ = True
def snake_case__( self : int , _UpperCamelCase : List[Any] ) ->List[Any]:
snake_case_ = self.config
# find the config node of interest if it exists
snake_case_ = ds_key_long.split('''.''' )
snake_case_ = nodes.pop()
for node in nodes:
snake_case_ = config.get(_UpperCamelCase )
if config is None:
return None, ds_key
return config, ds_key
def snake_case__( self : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=None ) ->List[Any]:
snake_case_, snake_case_ = self.find_config_node(_UpperCamelCase )
if config is None:
return default
return config.get(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple=False ) ->List[Any]:
snake_case_ = self.config
# find the config node of interest if it exists
snake_case_ = ds_key_long.split('''.''' )
for node in nodes:
snake_case_ = config
snake_case_ = config.get(_UpperCamelCase )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(_UpperCamelCase )
def snake_case__( self : int , _UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
snake_case_ = self.get_value(_UpperCamelCase )
return False if value is None else bool(_UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : Dict ) ->Optional[Any]:
snake_case_ = self.get_value(_UpperCamelCase )
return False if value is None else not bool(_UpperCamelCase )
def snake_case__( self : Optional[int] ) ->Dict:
return self._stage == 2
def snake_case__( self : List[str] ) ->List[str]:
return self._stage == 3
def snake_case__( self : Any ) ->Any:
return self._offload
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : Tuple ) ->Optional[int]:
snake_case_ = engine
def snake_case__( self : Tuple , _UpperCamelCase : Tuple , **_UpperCamelCase : Dict ) ->Tuple:
# runs backpropagation and handles mixed precision
self.engine.backward(_UpperCamelCase , **_UpperCamelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : Any ) ->Optional[Any]:
super().__init__(_UpperCamelCase , device_placement=_UpperCamelCase , scaler=_UpperCamelCase )
snake_case_ = hasattr(self.optimizer , '''overflow''' )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[str]=None ) ->List[str]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def snake_case__( self : Tuple ) ->Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def snake_case__( self : Dict ) ->List[str]:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Any ) ->Dict:
super().__init__(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : Union[str, Any] ) ->List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Any=0.001 , _UpperCamelCase : Optional[int]=0 , **_UpperCamelCase : List[str] ) ->int:
snake_case_ = params
snake_case_ = lr
snake_case_ = weight_decay
snake_case_ = kwargs
class snake_case_ :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=0 , **_UpperCamelCase : Dict ) ->List[Any]:
snake_case_ = optimizer
snake_case_ = total_num_steps
snake_case_ = warmup_num_steps
snake_case_ = kwargs | 8 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str:
super().__init__(
split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = load_from_cache_file
snake_case_ = file_format
snake_case_ = Spark(
df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , )
def snake_case__( self : int ) ->Tuple:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split ) | 8 | 1 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A_ = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def __init__( self : Any , *snake_case : Union[str, Any] , **snake_case : Optional[Any] ):
'''simple docstring'''
super().__init__(*snake_case , **snake_case )
self.check_model_type(snake_case )
def _UpperCamelCase ( self : Any , snake_case : List[str]=None , snake_case : List[str]=None , snake_case : Union[str, Any]=None , **snake_case : Optional[int] ):
'''simple docstring'''
A__ : Optional[Any] = {}, {}
if padding is not None:
A__ : Any = padding
if truncation is not None:
A__ : str = truncation
if top_k is not None:
A__ : Tuple = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , snake_case : Union["Image.Image", str] , snake_case : str = None , **snake_case : Dict ):
'''simple docstring'''
if isinstance(snake_case , (Image.Image, str) ) and isinstance(snake_case , snake_case ):
A__ : List[Any] = {"""image""": image, """question""": question}
else:
A__ : Union[str, Any] = image
A__ : Union[str, Any] = super().__call__(snake_case , **snake_case )
return results
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[Any] , snake_case : Union[str, Any]=False , snake_case : Tuple=False ):
'''simple docstring'''
A__ : Any = load_image(inputs["""image"""] )
A__ : List[str] = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=snake_case , truncation=snake_case )
A__ : str = self.image_processor(images=snake_case , return_tensors=self.framework )
model_inputs.update(snake_case )
return model_inputs
def _UpperCamelCase ( self : Optional[int] , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Union[str, Any] = self.model(**snake_case )
return model_outputs
def _UpperCamelCase ( self : Dict , snake_case : Optional[int] , snake_case : List[Any]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
A__ : Dict = self.model.config.num_labels
if self.framework == "pt":
A__ : Tuple = model_outputs.logits.sigmoid()[0]
A__ : List[Any] = probs.topk(snake_case )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
A__ : int = scores.tolist()
A__ : Dict = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case , snake_case )]
| 353 |
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296 | 0 |
"""simple docstring"""
from string import ascii_uppercase
lowerCAmelCase__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 3_6:
raise ValueError("base must be <= 36" )
lowerCAmelCase : str = ""
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : List[Any] = 0
while div != 1:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = divmod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if base >= 1_1 and 9 < mod < 3_6:
lowerCAmelCase : int = ALPHABET_VALUES[str(SCREAMING_SNAKE_CASE )]
else:
lowerCAmelCase : List[str] = str(SCREAMING_SNAKE_CASE )
new_value += actual_value
lowerCAmelCase : Dict = num // base
lowerCAmelCase : Tuple = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 108 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCamelCase = 1.5
UpperCamelCase = int(factor * num_class_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCamelCase = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCamelCase = int(factor * num_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
UpperCamelCase = class_images[count]
count += 1
try:
UpperCamelCase = requests.get(images["""url"""] )
if img.status_code == 200:
UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( )-> str:
UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 321 | 0 |
class _a :
def __init__( self: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = name
lowercase__ = value
lowercase__ = weight
def __repr__( self: List[Any] ) -> Dict:
"""simple docstring"""
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
return self.value
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
return self.name
def lowerCamelCase_ ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
return self.weight
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.value / self.weight
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ , lowercase__ = 0.0, 0.0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _a ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
lowerCAmelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase = [None] * 1000_0000
lowerCAmelCase = True
lowerCAmelCase = False
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ = chain(next_number(SCREAMING_SNAKE_CASE ) )
lowercase__ = number_chain
while number < 10_00_00_00:
lowercase__ = number_chain
number *= 10
return number_chain
def _a ( SCREAMING_SNAKE_CASE = 10_00_00_00 ):
"""simple docstring"""
for i in range(1 , SCREAMING_SNAKE_CASE ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 93 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 14 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A__: List[Any] = logging.get_logger(__name__)
A__: Any = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
A__: Dict = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
A__: Optional[int] = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
A__: Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
A__: Optional[Any] = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
A__: List[Any] = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
A__: int = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
A__: Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
A__: Optional[Any] = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
A__: List[Any] = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
A__: Optional[int] = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
A__: Optional[Any] = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
A__: Dict = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
A__: Dict = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
A__: Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A__: List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A__: str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A__: int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A__: str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A__: List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A__: List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A__: int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A__: str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A__: Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A__: Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A__: Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A__: str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A__: Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_MAPPING
A__: int = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A__: Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A__: Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A__: List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A__: int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A__: Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A__: Optional[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A__: str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A__: List[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A__: Any = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A__: Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A__: Dict = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _a ( _BaseAutoModelClass):
"""simple docstring"""
UpperCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A__: List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 149 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[str] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'deberta-v2'
def __init__( self , _a=128_100 , _a=1_536 , _a=24 , _a=24 , _a=6_144 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0 , _a=0.02 , _a=1e-7 , _a=False , _a=-1 , _a=0 , _a=True , _a=None , _a=0 , _a="gelu" , **_a , ):
super().__init__(**_a )
__magic_name__ : Tuple = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Dict = intermediate_size
__magic_name__ : List[str] = hidden_act
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : Dict = attention_probs_dropout_prob
__magic_name__ : Union[str, Any] = max_position_embeddings
__magic_name__ : List[Any] = type_vocab_size
__magic_name__ : Tuple = initializer_range
__magic_name__ : Dict = relative_attention
__magic_name__ : Tuple = max_relative_positions
__magic_name__ : str = pad_token_id
__magic_name__ : Optional[Any] = position_biased_input
# Backwards compatibility
if type(_a ) == str:
__magic_name__ : str = [x.strip() for x in pos_att_type.lower().split("|" )]
__magic_name__ : str = pos_att_type
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = layer_norm_eps
__magic_name__ : Union[str, Any] = kwargs.get("pooler_hidden_size" , _a )
__magic_name__ : str = pooler_dropout
__magic_name__ : Optional[Any] = pooler_hidden_act
class _snake_case ( snake_case ):
@property
def SCREAMING_SNAKE_CASE ( self ):
if self.task == "multiple-choice":
__magic_name__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__magic_name__ : int = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def SCREAMING_SNAKE_CASE ( self ):
return 12
def SCREAMING_SNAKE_CASE ( self , _a , _a = -1 , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , _a = None , ):
__magic_name__ : List[str] = super().generate_dummy_inputs(preprocessor=_a , framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 358 |
from __future__ import annotations
snake_case : Optional[int] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : Any = graph
# mapping node to its parent in resulting breadth first tree
__magic_name__ : dict[str, str | None] = {}
__magic_name__ : List[str] = source_vertex
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.source_vertex}
__magic_name__ : Optional[int] = None
__magic_name__ : int = [self.source_vertex] # first in first out queue
while queue:
__magic_name__ : Optional[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_a )
__magic_name__ : Dict = vertex
queue.append(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
if target_vertex == self.source_vertex:
return self.source_vertex
__magic_name__ : str = self.parent.get(_a )
if target_vertex_parent is None:
__magic_name__ : Union[str, Any] = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_a )
return self.shortest_path(_a ) + f'''->{target_vertex}'''
if __name__ == "__main__":
snake_case : int = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 41 | 0 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCAmelCase = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
_lowerCAmelCase = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
_lowerCAmelCase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
_lowerCAmelCase = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
_lowerCAmelCase = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
_lowerCAmelCase = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
_lowerCAmelCase = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = randrange(len(_lowerCamelCase ) ), randrange(len(_lowerCamelCase ) )
__UpperCamelCase : Optional[Any] = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__UpperCamelCase : Union[str, Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCAmelCase ( snake_case__ = 100 ):
return (generate_random_hand() for _ in range(_lowerCamelCase ))
@pytest.mark.parametrize("hand, expected" , _lowerCamelCase )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert PokerHand(_lowerCamelCase )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , _lowerCamelCase )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert PokerHand(_lowerCamelCase )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , _lowerCamelCase )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : int = PokerHand(_lowerCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , _lowerCamelCase )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert PokerHand(_lowerCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , _lowerCamelCase )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert PokerHand(_lowerCamelCase )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , _lowerCamelCase )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
assert PokerHand(_lowerCamelCase ).compare_with(PokerHand(_lowerCamelCase ) ) == expected
def __lowerCAmelCase ( ):
__UpperCamelCase : Optional[Any] = [PokerHand(_lowerCamelCase ) for hand in SORTED_HANDS]
__UpperCamelCase : Any = poker_hands.copy()
shuffle(_lowerCamelCase )
__UpperCamelCase : List[str] = chain(sorted(_lowerCamelCase ) )
for index, hand in enumerate(_lowerCamelCase ):
assert hand == poker_hands[index]
def __lowerCAmelCase ( ):
# Test that five high straights are compared correctly.
__UpperCamelCase : Dict = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=_lowerCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCAmelCase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__UpperCamelCase : Optional[int] = PokerHand("2C 4S AS 3D 5C" )
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Optional[Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCAmelCase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
__UpperCamelCase : str = os.path.join(_lowerCamelCase , "poker_hands.txt" )
with open(_lowerCamelCase ) as file_hand:
for line in file_hand:
__UpperCamelCase : List[Any] = line[:14].strip()
__UpperCamelCase : Optional[int] = line[15:].strip()
__UpperCamelCase : Any = PokerHand(_lowerCamelCase ), PokerHand(_lowerCamelCase )
__UpperCamelCase : Any = player.compare_with(_lowerCamelCase )
if output == "Win":
answer += 1
assert answer == 376
| 298 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Any = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """efficientformer"""
def __init__( self , A = [3, 2, 6, 4] , A = [4_8, 9_6, 2_2_4, 4_4_8] , A = [True, True, True, True] , A = 4_4_8 , A = 3_2 , A = 4 , A = 7 , A = 5 , A = 8 , A = 4 , A = 0.0 , A = 1_6 , A = 3 , A = 3 , A = 3 , A = 2 , A = 1 , A = 0.0 , A = 1 , A = True , A = True , A = 1e-5 , A = "gelu" , A = 0.02 , A = 1e-1_2 , A = 2_2_4 , A = 1e-0_5 , **A , ) -> None:
super().__init__(**A )
snake_case : Dict = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Any = hidden_sizes
snake_case : Optional[Any] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : List[Any] = initializer_range
snake_case : str = layer_norm_eps
snake_case : Dict = patch_size
snake_case : Optional[int] = num_channels
snake_case : int = depths
snake_case : Optional[int] = mlp_expansion_ratio
snake_case : Any = downsamples
snake_case : Dict = dim
snake_case : Optional[int] = key_dim
snake_case : Union[str, Any] = attention_ratio
snake_case : Any = resolution
snake_case : Dict = pool_size
snake_case : Any = downsample_patch_size
snake_case : Tuple = downsample_stride
snake_case : Any = downsample_pad
snake_case : Union[str, Any] = drop_path_rate
snake_case : List[str] = num_metaad_blocks
snake_case : Union[str, Any] = distillation
snake_case : List[str] = use_layer_scale
snake_case : int = layer_scale_init_value
snake_case : Union[str, Any] = image_size
snake_case : Dict = batch_norm_eps
| 176 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCAmelCase ( lowerCAmelCase_):
_a = "Salesforce/blip-image-captioning-base"
_a = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_a = "image_captioner"
_a = AutoModelForVisionaSeq
_a = ["image"]
_a = ["text"]
def __init__( self: Any , *_lowerCAmelCase: Any , **_lowerCAmelCase: Tuple ):
requires_backends(self , ["vision"] )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: "Image" ):
return self.pre_processor(images=lowerCamelCase__ , return_tensors="pt" )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: List[str] ):
return self.model.generate(**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ):
return self.pre_processor.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )[0].strip()
| 236 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 296 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __A ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def __A ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def __A ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase__ ):
http_head('''https://huggingface.co''' )
| 365 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__A = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__A = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__A = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__A = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__A = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__A = ''
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
assert ReadMe.from_string(_lowercase , _lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ):
_A = ReadMe.from_string(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase ):
'''simple docstring'''
ReadMe.from_string(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = ReadMe.from_readme(_lowercase , _lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
_A = ReadMe.from_readme(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
ReadMe.from_readme(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
ReadMe.from_readme(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
| 75 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.