code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = AudioLDMPipeline
lowerCamelCase__ = TEXT_TO_AUDIO_PARAMS
lowerCamelCase__ = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCamelCase__ = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
])
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=(32, 64), class_embed_type="simple_projection", projection_class_embeddings_input_dim=32, class_embeddings_concat=__a, )
_lowerCAmelCase : Dict = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=__a, set_alpha_to_one=__a, )
torch.manual_seed(0)
_lowerCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
torch.manual_seed(0)
_lowerCAmelCase : int = ClapTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, projection_dim=32, )
_lowerCAmelCase : Optional[int] = ClapTextModelWithProjection(__a)
_lowerCAmelCase : str = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77)
_lowerCAmelCase : int = SpeechTaHifiGanConfig(
model_in_dim=8, sampling_rate=1_6000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=__a, )
_lowerCAmelCase : str = SpeechTaHifiGan(__a)
_lowerCAmelCase : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
if str(__a).startswith("mps"):
_lowerCAmelCase : List[Any] = torch.manual_seed(__a)
else:
_lowerCAmelCase : Any = torch.Generator(device=__a).manual_seed(__a)
_lowerCAmelCase : List[Any] = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = AudioLDMPipeline(**__a)
_lowerCAmelCase : Union[str, Any] = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : str = audioldm_pipe(**__a)
_lowerCAmelCase : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a) == 256
_lowerCAmelCase : List[str] = audio[:10]
_lowerCAmelCase : int = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : List[Any] = AudioLDMPipeline(**__a)
_lowerCAmelCase : Optional[int] = audioldm_pipe.to(__a)
_lowerCAmelCase : Union[str, Any] = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(__a)
_lowerCAmelCase : List[str] = 3 * [inputs["prompt"]]
# forward
_lowerCAmelCase : Dict = audioldm_pipe(**__a)
_lowerCAmelCase : List[Any] = output.audios[0]
_lowerCAmelCase : Any = self.get_dummy_inputs(__a)
_lowerCAmelCase : Dict = 3 * [inputs.pop("prompt")]
_lowerCAmelCase : List[str] = audioldm_pipe.tokenizer(
__a, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=__a, return_tensors="pt", )
_lowerCAmelCase : str = text_inputs["input_ids"].to(__a)
_lowerCAmelCase : Dict = audioldm_pipe.text_encoder(
__a, )
_lowerCAmelCase : Tuple = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase : str = F.normalize(__a, dim=-1)
_lowerCAmelCase : Dict = prompt_embeds
# forward
_lowerCAmelCase : List[Any] = audioldm_pipe(**__a)
_lowerCAmelCase : Dict = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = AudioLDMPipeline(**__a)
_lowerCAmelCase : List[Any] = audioldm_pipe.to(__a)
_lowerCAmelCase : int = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = self.get_dummy_inputs(__a)
_lowerCAmelCase : List[Any] = 3 * ["this is a negative prompt"]
_lowerCAmelCase : Tuple = negative_prompt
_lowerCAmelCase : Dict = 3 * [inputs["prompt"]]
# forward
_lowerCAmelCase : Tuple = audioldm_pipe(**__a)
_lowerCAmelCase : Dict = output.audios[0]
_lowerCAmelCase : List[str] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Tuple = 3 * [inputs.pop("prompt")]
_lowerCAmelCase : List[str] = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase : Tuple = audioldm_pipe.tokenizer(
__a, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=__a, return_tensors="pt", )
_lowerCAmelCase : List[Any] = text_inputs["input_ids"].to(__a)
_lowerCAmelCase : str = audioldm_pipe.text_encoder(
__a, )
_lowerCAmelCase : Optional[int] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase : Optional[Any] = F.normalize(__a, dim=-1)
embeds.append(__a)
_lowerCAmelCase , _lowerCAmelCase : int = embeds
# forward
_lowerCAmelCase : str = audioldm_pipe(**__a)
_lowerCAmelCase : Union[str, Any] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=__a)
_lowerCAmelCase : Optional[Any] = AudioLDMPipeline(**__a)
_lowerCAmelCase : Union[str, Any] = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : int = self.get_dummy_inputs(__a)
_lowerCAmelCase : Optional[Any] = "egg cracking"
_lowerCAmelCase : Dict = audioldm_pipe(**__a, negative_prompt=__a)
_lowerCAmelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(__a) == 256
_lowerCAmelCase : Optional[Any] = audio[:10]
_lowerCAmelCase : List[str] = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032])
assert np.abs(audio_slice - expected_slice).max() < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : List[Any] = PNDMScheduler(skip_prk_steps=__a)
_lowerCAmelCase : Tuple = AudioLDMPipeline(**__a)
_lowerCAmelCase : Optional[Any] = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase : Optional[Any] = audioldm_pipe(__a, num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : int = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase : int = 2
_lowerCAmelCase : Tuple = audioldm_pipe(__a, num_inference_steps=2, num_waveforms_per_prompt=__a).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : Union[str, Any] = audioldm_pipe(
[prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=__a).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : str = self.get_dummy_components()
_lowerCAmelCase : int = AudioLDMPipeline(**__a)
_lowerCAmelCase : Any = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase : List[str] = self.get_dummy_inputs(__a)
_lowerCAmelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016, **__a)
_lowerCAmelCase : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a) / vocoder_sampling_rate == 0.016
_lowerCAmelCase : List[str] = audioldm_pipe(audio_length_in_s=0.032, **__a)
_lowerCAmelCase : str = output.audios[0]
assert audio.ndim == 1
assert len(__a) / vocoder_sampling_rate == 0.032
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Dict = AudioLDMPipeline(**__a)
_lowerCAmelCase : List[Any] = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[int] = ["hey"]
_lowerCAmelCase : Optional[int] = audioldm_pipe(__a, num_inference_steps=1)
_lowerCAmelCase : Optional[int] = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase : int = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase : int = SpeechTaHifiGan(__a).to(__a)
_lowerCAmelCase : Tuple = audioldm_pipe(__a, num_inference_steps=1)
_lowerCAmelCase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case__ ( self):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a)
def snake_case__ ( self):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def snake_case__ ( self):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a)
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self, __a, __a="cpu", __a=torch.floataa, __a=0):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.Generator(device=__a).manual_seed(__a)
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(__a).standard_normal((1, 8, 128, 16))
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(__a).to(device=__a, dtype=__a)
_lowerCAmelCase : str = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm")
_lowerCAmelCase : List[Any] = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = self.get_inputs(__a)
_lowerCAmelCase : Any = 25
_lowerCAmelCase : List[Any] = audioldm_pipe(**__a).audios[0]
assert audio.ndim == 1
assert len(__a) == 8_1920
_lowerCAmelCase : Any = audio[7_7230:7_7240]
_lowerCAmelCase : Tuple = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315])
_lowerCAmelCase : str = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = AudioLDMPipeline.from_pretrained("cvssp/audioldm")
_lowerCAmelCase : List[str] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = audioldm_pipe.to(__a)
audioldm_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : str = self.get_inputs(__a)
_lowerCAmelCase : Optional[Any] = audioldm_pipe(**__a).audios[0]
assert audio.ndim == 1
assert len(__a) == 8_1920
_lowerCAmelCase : Any = audio[2_7780:2_7790]
_lowerCAmelCase : Optional[Any] = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212])
_lowerCAmelCase : Optional[int] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3E-2
| 36 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
from collections.abc import Sequence
def __lowercase ( _a , _a ):
return sum(c * (x**i) for i, c in enumerate(_SCREAMING_SNAKE_CASE ) )
def __lowercase ( _a , _a ):
snake_case_ : Union[str, Any] = 0.0
for coeff in reversed(_SCREAMING_SNAKE_CASE ):
snake_case_ : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
lowercase__ : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
lowercase__ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 351 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 155 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase_ : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : List[str] , *__a : List[Any] , **__a : List[Any] ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 63 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 243 | 0 |
def UpperCamelCase_( _snake_case : float , _snake_case : list[float] ):
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__a =sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) )
return round(_snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308 |
import os
def UpperCamelCase_( _snake_case : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file:
__a =[
[int(_snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
__a =len(_snake_case )
__a =len(matrix[0] )
__a =[[-1 for _ in range(_snake_case )] for _ in range(_snake_case )]
for i in range(_snake_case ):
__a =matrix[i][0]
for j in range(1 , _snake_case ):
for i in range(_snake_case ):
__a =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _snake_case ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308 | 1 |
'''simple docstring'''
from statistics import mean, stdev
def lowerCAmelCase_ ( _lowerCamelCase: list , _lowerCamelCase: int = 3 ):
__SCREAMING_SNAKE_CASE : str = min(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = max(_lowerCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowerCamelCase ) for x in data]
def lowerCAmelCase_ ( _lowerCamelCase: list , _lowerCamelCase: int = 3 ):
__SCREAMING_SNAKE_CASE : Tuple = mean(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = stdev(_lowerCamelCase )
# standardize data
return [round((x - mu) / (sigma) , _lowerCamelCase ) for x in data] | 112 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCamelCase__ : str = logging.getLogger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Union[str, Any] = '''summarization'''
_A : Optional[Any] = ['''loss''']
_A : Tuple = ROUGE_KEYS
_A : int = '''rouge2'''
def __init__( self : int , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
__SCREAMING_SNAKE_CASE : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE : int = Path(self.output_dir ) / """metrics.json"""
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[Any] = defaultdict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.config.model_type
__SCREAMING_SNAKE_CASE : List[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
__SCREAMING_SNAKE_CASE : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__SCREAMING_SNAKE_CASE : List[Any] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
__SCREAMING_SNAKE_CASE : Any = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__SCREAMING_SNAKE_CASE : Any = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__SCREAMING_SNAKE_CASE : Any = get_git_info()["""repo_sha"""]
__SCREAMING_SNAKE_CASE : Any = hparams.num_workers
__SCREAMING_SNAKE_CASE : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__SCREAMING_SNAKE_CASE : Any = self.decoder_start_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = self.hparams.eval_max_gen_length
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.max_length
__SCREAMING_SNAKE_CASE : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Dict[str, torch.Tensor] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
__SCREAMING_SNAKE_CASE : Optional[int] = True
return readable_batch
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Any , **lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
return self.model(lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.pad_token_id
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = batch["""input_ids"""], batch["""attention_mask"""]
__SCREAMING_SNAKE_CASE : Tuple = batch["""labels"""]
if isinstance(self.model , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : str = self.model._shift_right(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__SCREAMING_SNAKE_CASE : Tuple = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__SCREAMING_SNAKE_CASE : Tuple = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = label_smoothed_nll_loss(
lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._step(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = dict(zip(self.loss_names , lowerCAmelCase__ ) )
# tokens per batch
__SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
__SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].shape[0]
__SCREAMING_SNAKE_CASE : str = batch["""input_ids"""].eq(self.pad ).sum()
__SCREAMING_SNAKE_CASE : Optional[int] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
return self._generative_step(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]="val" ):
"""simple docstring"""
self.step_count += 1
__SCREAMING_SNAKE_CASE : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__SCREAMING_SNAKE_CASE : List[Any] = losses["""loss"""]
__SCREAMING_SNAKE_CASE : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
__SCREAMING_SNAKE_CASE : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__SCREAMING_SNAKE_CASE : torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
__SCREAMING_SNAKE_CASE : Optional[int] = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
__SCREAMING_SNAKE_CASE : int = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ):
"""simple docstring"""
return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__SCREAMING_SNAKE_CASE : List[str] = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (time.time() - ta) / batch["""input_ids"""].shape[0]
__SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(batch["""labels"""] )
__SCREAMING_SNAKE_CASE : Optional[Any] = self._step(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = dict(zip(self.loss_names , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ )
return base_metrics
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return self._generative_step(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
return self.validation_epoch_end(lowerCAmelCase__ , prefix="""test""" )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.n_obs[type_path]
__SCREAMING_SNAKE_CASE : str = self.target_lens[type_path]
__SCREAMING_SNAKE_CASE : str = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , )
return dataset
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE : Optional[int] = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__SCREAMING_SNAKE_CASE : Any = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
return dataloader
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCAmelCase__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCAmelCase__ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("""--logger_name""" , type=lowerCAmelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCAmelCase__ , default=5_0_0 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCAmelCase__ , default="""summarization""" , required=lowerCAmelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ )
parser.add_argument("""--src_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ )
parser.add_argument("""--tgt_lang""" , type=lowerCAmelCase__ , default="""""" , required=lowerCAmelCase__ )
parser.add_argument("""--eval_beams""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
"""--val_metric""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[Any] = '''translation'''
_A : int = ['''loss''']
_A : Union[str, Any] = ['''bleu''']
_A : Dict = '''bleu'''
def __init__( self : Any , lowerCAmelCase__ : int , **lowerCAmelCase__ : Any ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = hparams.src_lang
__SCREAMING_SNAKE_CASE : Dict = hparams.tgt_lang
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str=None ):
Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase )
check_output_dir(_lowerCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
__SCREAMING_SNAKE_CASE : SummarizationModule = SummarizationModule(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : SummarizationModule = TranslationModule(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
__SCREAMING_SNAKE_CASE : str = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE : Any = os.environ.get("""WANDB_PROJECT""" , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = WandbLogger(name=model.output_dir.name , project=_lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__SCREAMING_SNAKE_CASE : Optional[int] = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" )
if args.early_stopping_patience >= 0:
__SCREAMING_SNAKE_CASE : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Dict = args.val_metric == """loss"""
__SCREAMING_SNAKE_CASE : pl.Trainer = generic_train(
_lowerCamelCase , _lowerCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowerCamelCase ) , early_stopping_callback=_lowerCamelCase , logger=_lowerCamelCase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
__SCREAMING_SNAKE_CASE : Optional[int] = """"""
__SCREAMING_SNAKE_CASE : Any = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=_lowerCamelCase ) )
if checkpoints:
__SCREAMING_SNAKE_CASE : List[Any] = checkpoints[-1]
__SCREAMING_SNAKE_CASE : str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
UpperCamelCase__ : Dict = pl.Trainer.add_argparse_args(parser)
UpperCamelCase__ : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCamelCase__ : List[str] = parser.parse_args()
main(args) | 112 | 1 |
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase__ = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def A(__a: str , __a: List[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
return (preds == labels).mean()
def A(__a: Any , __a: Any ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = simple_accuracy(__a , __a )
lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def A(__a: List[str] , __a: Optional[int] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
lowerCAmelCase_ = pearsonr(__a , __a )[0]
lowerCAmelCase_ = spearmanr(__a , __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def A(__a: Union[str, Any] , __a: Any , __a: str ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "mrpc":
return acc_and_fa(__a , __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a )
elif task_name == "qqp":
return acc_and_fa(__a , __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ):
warnings.warn(__a , __a )
requires_backends(__a , "sklearn" )
if len(__a ) != len(__a ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a )}
else:
raise KeyError(__a )
| 22 | 1 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( snake_case__ = "matrix.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(snake_case__ ) , snake_case__ ) ) as in_file:
A : Dict = in_file.read()
A : Union[str, Any] = [[int(snake_case__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
A : Union[str, Any] = [[0 for cell in row] for row in grid]
A : Any = len(grid[0] )
A : int = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )]
A : List[str] = grid[0][0]
for i in range(1 , snake_case__ ):
A : Any = grid[0][i] + dp[0][i - 1]
for i in range(1 , snake_case__ ):
A : Dict = grid[i][0] + dp[i - 1][0]
for i in range(1 , snake_case__ ):
for j in range(1 , snake_case__ ):
A : Union[str, Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 3 |
'''simple docstring'''
import os
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 3 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = 'ResNetConfig'
# Base docstring
_UpperCAmelCase = 'microsoft/resnet-50'
_UpperCAmelCase = [1, 2_0_4_8, 7, 7]
# Image classification docstring
_UpperCAmelCase = 'microsoft/resnet-50'
_UpperCAmelCase = 'tiger cat'
_UpperCAmelCase = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _UpperCamelCase ( nn.Module ):
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int = 3 , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: str = "relu" ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Convad(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , bias=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Tensor ) -> Tensor:
"""simple docstring"""
UpperCamelCase_ = self.convolution(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.normalization(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCamelCase ( nn.Module ):
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: ResNetConfig ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
UpperCamelCase_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
UpperCamelCase_ = config.num_channels
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tensor ) -> Tensor:
"""simple docstring"""
UpperCamelCase_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
UpperCamelCase_ = self.embedder(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.pooler(_SCREAMING_SNAKE_CASE )
return embedding
class _UpperCamelCase ( nn.Module ):
def __init__( self: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int = 2 ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Tensor ) -> Tensor:
"""simple docstring"""
UpperCamelCase_ = self.convolution(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCamelCase ( nn.Module ):
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: str = "relu" ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = in_channels != out_channels or stride != 1
UpperCamelCase_ = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase_ = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=_SCREAMING_SNAKE_CASE ) , )
UpperCamelCase_ = ACTaFN[activation]
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = hidden_state
UpperCamelCase_ = self.layer(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCamelCase_ = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCamelCase ( nn.Module ):
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: str = "relu" , _SCREAMING_SNAKE_CASE: int = 4 ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = in_channels != out_channels or stride != 1
UpperCamelCase_ = out_channels // reduction
UpperCamelCase_ = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase_ = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
UpperCamelCase_ = ACTaFN[activation]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = hidden_state
UpperCamelCase_ = self.layer(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCamelCase_ = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCamelCase ( nn.Module ):
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: ResNetConfig , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int = 2 , _SCREAMING_SNAKE_CASE: int = 2 , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
UpperCamelCase_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tensor ) -> Tensor:
"""simple docstring"""
UpperCamelCase_ = input
for layer in self.layers:
UpperCamelCase_ = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class _UpperCamelCase ( nn.Module ):
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: ResNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(ResNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE ) )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tensor , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
UpperCamelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase_ = hidden_states + (hidden_state,)
UpperCamelCase_ = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
UpperCamelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE , )
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Tuple = ResNetConfig
_UpperCamelCase : str = '''resnet'''
_UpperCamelCase : Optional[Any] = '''pixel_values'''
_UpperCamelCase : List[str] = True
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> int:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = value
_UpperCAmelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_UpperCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , lowerCAmelCase_ , )
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = config
UpperCamelCase_ = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = ResNetEncoder(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Tensor , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
UpperCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_ = self.embedder(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = encoder_outputs[0]
UpperCamelCase_ = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCAmelCase_ , )
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: Any , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Dict:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = config.num_labels
UpperCamelCase_ = ResNetModel(_SCREAMING_SNAKE_CASE )
# classification head
UpperCamelCase_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Optional[torch.FloatTensor] = None , _SCREAMING_SNAKE_CASE: Optional[torch.LongTensor] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
UpperCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_ = self.resnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase_ = self.classifier(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase_ = "single_label_classification"
else:
UpperCamelCase_ = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCamelCase_ = MSELoss()
if self.num_labels == 1:
UpperCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase_ = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase_ = CrossEntropyLoss()
UpperCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase_ = BCEWithLogitsLoss()
UpperCamelCase_ = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
UpperCamelCase_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , lowerCAmelCase_ , )
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> int:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
super()._init_backbone(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [config.embedding_size] + config.hidden_sizes
UpperCamelCase_ = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = ResNetEncoder(_SCREAMING_SNAKE_CASE )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Tensor , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None ) -> BackboneOutput:
"""simple docstring"""
UpperCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_ = self.embedder(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.encoder(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = outputs.hidden_states
UpperCamelCase_ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCamelCase_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_SCREAMING_SNAKE_CASE , )
| 328 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowercase( __a ):
'''simple docstring'''
def __init__( self: str, a_: str = "▁", a_: bool = True, a_: Union[str, AddedToken] = "<unk>", a_: Union[str, AddedToken] = "</s>", a_: Union[str, AddedToken] = "<pad>", ):
'''simple docstring'''
_snake_case : Optional[Any] = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
_snake_case : List[str] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_snake_case : Optional[Any] = token_dict["""token"""]
_snake_case : Tuple = Tokenizer(Unigram() )
_snake_case : int = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ), """ """ ),
normalizers.Lowercase(),
] )
_snake_case : Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=a_, add_prefix_space=a_ ),
pre_tokenizers.Digits(individual_digits=a_ ),
pre_tokenizers.Punctuation(),
] )
_snake_case : Any = decoders.Metaspace(replacement=a_, add_prefix_space=a_ )
_snake_case : List[Any] = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}", special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])], )
_snake_case : int = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(a_, a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Union[str, List[str]], a_: int = 8_000, a_: bool = True, ):
'''simple docstring'''
_snake_case : Dict = trainers.UnigramTrainer(
vocab_size=a_, special_tokens=self.special_tokens_list, show_progress=a_, )
if isinstance(a_, a_ ):
_snake_case : str = [files]
self._tokenizer.train(a_, trainer=a_ )
self.add_unk_id()
def UpperCamelCase_ ( self: str, a_: Union[Iterator[str], Iterator[Iterator[str]]], a_: int = 8_000, a_: bool = True, ):
'''simple docstring'''
_snake_case : Optional[int] = trainers.UnigramTrainer(
vocab_size=a_, special_tokens=self.special_tokens_list, show_progress=a_, )
self._tokenizer.train_from_iterator(a_, trainer=a_ )
self.add_unk_id()
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : int = json.loads(self._tokenizer.to_str() )
_snake_case : Tuple = self.special_tokens["""unk"""]["""id"""]
_snake_case : Dict = Tokenizer.from_str(json.dumps(a_ ) )
| 64 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
a = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase () -> int:
'''simple docstring'''
lowerCAmelCase = """https://pypi.org/pypi/diffusers/json"""
lowerCAmelCase = json.loads(request.urlopen(snake_case__ ).read() )["""releases"""].keys()
return sorted(snake_case__ , key=lambda snake_case__ : version.Version(snake_case__ ) )
def lowercase () -> List[str]:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = Path(snake_case__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def lowercase (snake_case__ : Union[str, os.PathLike] ) -> List[Any]:
'''simple docstring'''
init_hf_modules()
lowerCAmelCase = Path(snake_case__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def lowercase (snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read()
# Imports of the form `import .xxx`
lowerCAmelCase = re.findall("""^\s*import\s+\.(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE )
# Unique-ify
return list(set(snake_case__ ) )
def lowercase (snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = False
lowerCAmelCase = [module_file]
lowerCAmelCase = []
# Let's recurse through all relative imports
while not no_change:
lowerCAmelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(snake_case__ ) )
lowerCAmelCase = Path(snake_case__ ).parent
lowerCAmelCase = [str(module_path / m ) for m in new_imports]
lowerCAmelCase = [f for f in new_import_files if f not in all_relative_imports]
lowerCAmelCase = [f'''{f}.py''' for f in new_import_files]
lowerCAmelCase = len(snake_case__ ) == 0
all_relative_imports.extend(snake_case__ )
return all_relative_imports
def lowercase (snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read()
# Imports of the form `import xxx`
lowerCAmelCase = re.findall("""^\s*import\s+(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE )
# Only keep the top-level module
lowerCAmelCase = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
lowerCAmelCase = list(set(snake_case__ ) )
lowerCAmelCase = []
for imp in imports:
try:
importlib.import_module(snake_case__ )
except ImportError:
missing_packages.append(snake_case__ )
if len(snake_case__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f'''{', '.join(snake_case__ )}. Run `pip install {' '.join(snake_case__ )}`''' )
return get_relative_imports(snake_case__ )
def lowercase (snake_case__ : Any , snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = module_path.replace(os.path.sep , """.""" )
lowerCAmelCase = importlib.import_module(snake_case__ )
if class_name is None:
return find_pipeline_class(snake_case__ )
return getattr(snake_case__ , snake_case__ )
def lowercase (snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
lowerCAmelCase = dict(inspect.getmembers(snake_case__ , inspect.isclass ) )
lowerCAmelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , snake_case__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
f''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
f''' {loaded_module}.''' )
lowerCAmelCase = cls
return pipeline_class
def lowercase (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , ) -> int:
'''simple docstring'''
lowerCAmelCase = str(snake_case__ )
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if os.path.isfile(snake_case__ ):
lowerCAmelCase = module_file_or_url
lowerCAmelCase = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
lowerCAmelCase = get_diffusers_versions()
# cut ".dev0"
lowerCAmelCase = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
lowerCAmelCase = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowerCAmelCase = f'''v{revision}'''
elif revision == "main":
lowerCAmelCase = revision
else:
raise ValueError(
f'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
f''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowerCAmelCase = COMMUNITY_PIPELINES_URL.format(revision=snake_case__ , pipeline=snake_case__ )
try:
lowerCAmelCase = cached_download(
snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
lowerCAmelCase = """git"""
lowerCAmelCase = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowerCAmelCase = hf_hub_download(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
lowerCAmelCase = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowerCAmelCase = check_imports(snake_case__ )
# Now we move the module inside our cached dynamic modules.
lowerCAmelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(snake_case__ )
lowerCAmelCase = Path(snake_case__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(snake_case__ , submodule_path / module_file )
for module_needed in modules_needed:
lowerCAmelCase = f'''{module_needed}.py'''
shutil.copy(os.path.join(snake_case__ , snake_case__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase = use_auth_token
elif use_auth_token is True:
lowerCAmelCase = HfFolder.get_token()
else:
lowerCAmelCase = None
lowerCAmelCase = model_info(snake_case__ , revision=snake_case__ , token=snake_case__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCAmelCase = submodule_path / commit_hash
lowerCAmelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(snake_case__ )
if not (submodule_path / module_file).exists():
shutil.copy(snake_case__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
snake_case__ , f'''{module_needed}.py''' , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return os.path.join(snake_case__ , snake_case__ )
def lowercase (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[str] = None , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , **snake_case__ : Any , ) -> int:
'''simple docstring'''
lowerCAmelCase = get_cached_module_file(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return get_class_in_module(snake_case__ , final_module.replace(""".py""" , """""" ) )
| 155 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
__UpperCamelCase : Tuple = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
__UpperCamelCase : Any = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
__UpperCamelCase : Union[str, Any] = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 355 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Dict = '''▁'''
__UpperCamelCase : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
__UpperCamelCase : str = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
__UpperCamelCase : Optional[Any] = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
__UpperCamelCase : str = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class a ( a__ ):
snake_case__ = ["input_ids"]
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = RESOURCE_FILES_NAMES
def __init__( self , _snake_case , _snake_case=None , _snake_case=False , _snake_case="utf8" , _snake_case="[UNK]" , _snake_case="[SEP]" , _snake_case="[PAD]" , _snake_case="[CLS]" , _snake_case="[MASK]" , _snake_case = None , **_snake_case , ):
"""simple docstring"""
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , vocab_file=_snake_case , encoding=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = sentencepiece_model_ckpt
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase = self.load_vocab(filepath=_snake_case )
else:
lowerCAmelCase = {self.sp_model.id_to_piece(_snake_case ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase = self.tokenize(_snake_case )
lowerCAmelCase ,lowerCAmelCase = '', []
for i, ch in enumerate(_snake_case ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase = self.SP_CHAR_MAPPING.get(_snake_case )
else:
lowerCAmelCase = unicodedata.normalize('NFKC' , _snake_case )
if self.is_whitespace(_snake_case ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_snake_case ) )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase = token[1:]
lowerCAmelCase = text[offset:].index(_snake_case ) + offset
lowerCAmelCase = start + len(_snake_case )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase = end
return token_mapping
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_snake_case , _snake_case ) for c in text) )
def UpperCamelCase__ ( self , _snake_case , _snake_case=False , _snake_case=64 , _snake_case=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
lowerCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
lowerCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
lowerCAmelCase = self.sp_model.EncodeAsPieces(_snake_case )
else:
lowerCAmelCase = self.sp_model.SampleEncodeAsPieces(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = []
for pi, piece in enumerate(_snake_case ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_snake_case ) and pi != 0:
new_pieces.append(_snake_case )
continue
else:
continue
lowerCAmelCase = 0
for i, chunk in enumerate(_snake_case ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_snake_case ) or self.is_punct(_snake_case ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_snake_case )
lowerCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase = i
if len(_snake_case ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.convert_ids_to_tokens(_snake_case )
lowerCAmelCase = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.reverse_vocab.get(_snake_case , self.unk_token )
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase__ ( self , _snake_case , _snake_case=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase__ ( self , _snake_case , _snake_case=None , _snake_case=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_snake_case ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_snake_case ) + 1) + [1] * (len(_snake_case ) + 3)
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_snake_case ) == 1:
lowerCAmelCase = unicodedata.category(_snake_case )
if cat == "Zs":
return True
return False
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = {}
with io.open(_snake_case , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_snake_case ):
lowerCAmelCase = line.rstrip('\n' )
lowerCAmelCase = int(_snake_case )
return token_to_idx
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = 0
if os.path.isdir(_snake_case ):
lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
lowerCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
lowerCAmelCase = token_index
writer.write(token + '\n' )
index += 1
lowerCAmelCase = os.path.join(_snake_case , 'sentencepiece.bpe.model' )
with open(_snake_case , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (vocab_file,)
| 309 | 0 |
def snake_case( __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
lowercase : str = len(__magic_name__ )
lowercase : Any = max(__magic_name__ )
lowercase : Optional[Any] = min(__magic_name__ )
# create the counting array
lowercase : List[str] = coll_max + 1 - coll_min
lowercase : Optional[int] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __magic_name__ ):
lowercase : Union[str, Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowercase : List[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __magic_name__ ) ):
lowercase : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
return "".join([chr(__magic_name__ ) for i in counting_sort([ord(__magic_name__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
lowerCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted)) | 308 |
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 308 | 1 |
def UpperCamelCase ( __lowercase : int = 3 ,__lowercase : int = 7 ,__lowercase : int = 1_00_00_00 ):
'''simple docstring'''
A_ : str = 0
A_ : int = 1
for current_denominator in range(1 ,limit + 1 ):
A_ : List[str] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
A_ : Optional[int] = current_numerator
A_ : Optional[Any] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 360 | from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 192 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[str] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {'''allegro/herbert-base-cased''': 514}
__SCREAMING_SNAKE_CASE :Optional[int] = {}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Tuple = HerbertTokenizer
def __init__( self : Dict , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None , snake_case_ : Dict=None , snake_case_ : List[Any]="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : Dict="<pad>" , snake_case_ : List[str]="<mask>" , snake_case_ : int="</s>" , **snake_case_ : str , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sep_token=snake_case_ , **snake_case_ , )
def lowercase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def lowercase ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : List[Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 22 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = TapasConfig.from_json_file(__lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_4694
_UpperCAmelCase = 0.20_7951
_UpperCAmelCase = 0.12_1194
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.035_2513
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.4519
_UpperCAmelCase = 0.90_3421
_UpperCAmelCase = 222.088
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_3141
_UpperCAmelCase = TapasForQuestionAnswering(config=__lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=__lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=__lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=__lowercase )
else:
raise ValueError(f'Task {task} not supported.' )
print(f'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowercase , __lowercase , __lowercase )
# Save pytorch-model (weights and configuration)
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowercase )
# Save tokenizer files
print(f'Save tokenizer files to {pytorch_dump_path}' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowercase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 22 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
a : Union[str, Any] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
a : Optional[int] = Dataset.from_dict(snake_case )
return dataset
class UpperCamelCase ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[int] = get_dataset()
a : Dict = make_duplicate_clusters(UpperCAmelCase_ , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Tuple = get_dataset()
a , a : List[str] = deduplicate_dataset(UpperCAmelCase_)
self.assertEqual(len(UpperCAmelCase_) , 2)
print(UpperCAmelCase_)
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2)
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCAmelCase_)
| 345 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def A_ ( snake_case : Optional[Any] ) -> List[str]:
'''simple docstring'''
return getitem, k
def A_ ( snake_case : str , snake_case : int ) -> Tuple:
'''simple docstring'''
return setitem, k, v
def A_ ( snake_case : int ) -> List[str]:
'''simple docstring'''
return delitem, k
def A_ ( snake_case : Any , snake_case : Optional[int] , *snake_case : Dict ) -> Any:
'''simple docstring'''
try:
return fun(snake_case , *snake_case ), None
except Exception as e:
return None, e
lowercase__ : List[str] = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
lowercase__ : int = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
lowercase__ : Tuple = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
lowercase__ : Tuple = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
lowercase__ : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowercase__ : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def A_ ( snake_case : List[str] ) -> Any:
'''simple docstring'''
__UpperCamelCase = HashMap(initial_block_size=4 )
__UpperCamelCase = {}
for _, (fun, *args) in enumerate(snake_case ):
__UpperCamelCase , __UpperCamelCase = _run_operation(snake_case , snake_case , *snake_case )
__UpperCamelCase , __UpperCamelCase = _run_operation(snake_case , snake_case , *snake_case )
assert my_res == py_res
assert str(snake_case ) == str(snake_case )
assert set(snake_case ) == set(snake_case )
assert len(snake_case ) == len(snake_case )
assert set(my.items() ) == set(py.items() )
def A_ ( ) -> List[Any]:
'''simple docstring'''
def is_public(snake_case : str ) -> bool:
return not name.startswith('''_''' )
__UpperCamelCase = {name for name in dir({} ) if is_public(snake_case )}
__UpperCamelCase = {name for name in dir(HashMap() ) if is_public(snake_case )}
assert dict_public_names > hash_public_names
| 328 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
a__ : Optional[int] = 2_0_4_8
a__ : List[Any] = 4_0_9_6
a__ : Optional[int] = 4_2
a__ : Dict = os.environ.pop('PROCESS_TRAIN', 'false')
a__ : List[str] = {'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def _lowercase ( __A ):
'''simple docstring'''
def choose_first(__A ,__A=False ):
assert isinstance(__A ,__A )
if len(__A ) == 1:
__UpperCamelCase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__UpperCamelCase = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
__UpperCamelCase = {"""id""": example["""id"""]}
__UpperCamelCase = example["""annotations"""]
__UpperCamelCase = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
__UpperCamelCase = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
__UpperCamelCase = __UpperCamelCase = []
__UpperCamelCase = __UpperCamelCase = []
__UpperCamelCase = ["""<cls>"""]
else:
__UpperCamelCase = ["""short"""]
__UpperCamelCase = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
__UpperCamelCase = ["""long"""]
__UpperCamelCase = choose_first(annotation["""long_answer"""] ,is_long_answer=__A )
__UpperCamelCase = []
answer.update(__A )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
__UpperCamelCase = True
else:
__UpperCamelCase = False
__UpperCamelCase = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] ,__A ) for k in cols ):
raise ValueError("""Issue in ID""" ,example["""id"""] )
return answer
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
__UpperCamelCase = _get_single_answer(__A )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__UpperCamelCase = example["""document"""]["""tokens"""]
__UpperCamelCase = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(__A ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__UpperCamelCase = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__UpperCamelCase = example["""document"""]["""tokens"""]
__UpperCamelCase = answer["""start_token"""]
__UpperCamelCase = answer["""end_token"""]
__UpperCamelCase = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__UpperCamelCase = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
__UpperCamelCase = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
__UpperCamelCase = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
__UpperCamelCase = """ """.join([old[i] for i in range(len(__A ) ) if not is_html[i]] )
if new != old:
print("""ID:""" ,example["""id"""] )
print("""New:""" ,__A ,end="""\n""" )
print("""Old:""" ,__A ,end="""\n\n""" )
return {
"context": " ".join(__A ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def _lowercase ( __A ,__A ,__A=2_048 ,__A=4_096 ,__A=True ):
'''simple docstring'''
__UpperCamelCase = get_context_and_ans(__A ,assertion=__A )
__UpperCamelCase = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__UpperCamelCase = tokenizer(example["""question"""]["""text"""] ,out["""context"""] ).input_ids
__UpperCamelCase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = input_ids[:q_len]
__UpperCamelCase = range(__A ,len(__A ) ,max_length - doc_stride )
for i in doc_start_indices:
__UpperCamelCase = i + max_length - q_len
__UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(__A ),
"end_token": [-100] * len(__A ),
"category": category,
},
}
__UpperCamelCase = out["""context"""].split()
__UpperCamelCase = splitted_context[answer["""end_token"""]]
__UpperCamelCase = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) ,add_special_tokens=__A ,).input_ids )
__UpperCamelCase = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) ,add_special_tokens=__A ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__UpperCamelCase = len(tokenizer(__A ,add_special_tokens=__A ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__UpperCamelCase = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
__UpperCamelCase = answer["""start_token"""]
__UpperCamelCase = answer["""end_token"""]
if assertion:
__UpperCamelCase = tokenizer.decode(__A )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" ,answer["""span"""] )
print("""NEW:""" ,__A ,end="""\n\n""" )
if len(__A ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__UpperCamelCase = input_ids[:q_len]
__UpperCamelCase = range(__A ,len(__A ) ,max_length - doc_stride )
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = []
__UpperCamelCase = [] # null, yes, no, long, short
for i in doc_start_indices:
__UpperCamelCase = i + max_length - q_len
__UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__UpperCamelCase = start_token - i + q_len
__UpperCamelCase = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
__UpperCamelCase = -100
__UpperCamelCase = -100
answers_category.append("""null""" )
__UpperCamelCase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__A )
answers_end_token.append(__A )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" ,example["""id"""] )
print("""New:""" ,tokenizer.decode(__A ) )
print("""Old:""" ,tokenizer.decode(__A ) ,end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def _lowercase ( __A ,__A ,__A=2_048 ,__A=4_096 ,__A=False ):
'''simple docstring'''
__UpperCamelCase = get_strided_contexts_and_ans(
__A ,__A ,doc_stride=__A ,max_length=__A ,assertion=__A ,)
return example
def _lowercase ( __A ,__A ):
'''simple docstring'''
with jsonlines.open(__A ,"""a""" ) as writer:
for example in tqdm(__A ,total=len(__A ) ,desc="""Saving samples ... """ ):
__UpperCamelCase = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] ,labels["""start_token"""] ,labels["""end_token"""] ,labels["""category"""] ,):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
a__ : Union[str, Any] = load_dataset('natural_questions')
a__ : int = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
a__ : int = data['train' if PROCESS_TRAIN == 'true' else 'validation']
a__ : str = {
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
a__ : Any = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
a__ : Tuple = data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
a__ : Any = 'nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 243 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "dict"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default='''Translation''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_)
def __call__( self ) -> Optional[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "dict"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default='''TranslationVariableLanguages''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_)
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = sorted(set(self.languages ) ) if self.languages else None
__UpperCamelCase = len(self.languages ) if self.languages else None
def __call__( self ) -> Any:
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __lowerCamelCase ( self , lowercase ) -> Any:
__UpperCamelCase = set(self.languages )
if self.languages and set(lowercase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(lowercase ) - lang_set ) )}) are not in valid set ({', '.join(lowercase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCamelCase = []
for lang, text in translation_dict.items():
if isinstance(lowercase , lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCamelCase , __UpperCamelCase = zip(*sorted(lowercase ) )
return {"language": languages, "translation": translations}
def __lowerCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 243 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[Any] = {
"""google/electra-small-generator""": 5_12,
"""google/electra-base-generator""": 5_12,
"""google/electra-large-generator""": 5_12,
"""google/electra-small-discriminator""": 5_12,
"""google/electra-base-discriminator""": 5_12,
"""google/electra-large-discriminator""": 5_12,
}
lowerCAmelCase : Dict = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_INIT_CONFIGURATION
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ElectraTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
_lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Tuple = getattr(snake_case__ , normalizer_state.pop('type' ) )
_lowerCAmelCase : List[Any] = do_lower_case
_lowerCAmelCase : List[Any] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Optional[int] = normalizer_class(**snake_case__ )
_lowerCAmelCase : List[Any] = do_lower_case
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 25 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """BlipImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = False
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self.image_processor
def __call__( self : Union[str, Any] , UpperCamelCase__ : ImageInput = None , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : int , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCamelCase = self.tokenizer
UpperCamelCase = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
return text_encoding
# add pixel_values
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
if text is not None:
UpperCamelCase = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
else:
UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase__ )
return encoding_image_processor
def A ( self : Tuple , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Any ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Union[str, Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 28 |
from math import factorial
def UpperCamelCase (lowercase_: int = 20 ) -> int:
A__ : Union[str, Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
A__ : Tuple = n // 2
return int(factorial(lowercase_ ) / (factorial(lowercase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
A_ : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 192 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
snake_case__ : Union[str, Any] = False
try:
snake_case__ : Tuple = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
def __init__(self :Dict , _UpperCamelCase :str = None , _UpperCamelCase :list = [] )-> Dict:
__A = 0
__A = choices
__A = prompt
if sys.platform == "win32":
__A = '''*'''
else:
__A = '''➔ '''
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Tuple , _UpperCamelCase :str = "" )-> int:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _UpperCamelCase )
else:
forceWrite(self.choices[index] , _UpperCamelCase )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :int )-> Optional[Any]:
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(_UpperCamelCase )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Direction , _UpperCamelCase :int = 1 )-> str:
__A = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_UpperCamelCase )
move_cursor(_UpperCamelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def _lowerCAmelCase (self :List[str] )-> int:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def _lowerCAmelCase (self :int )-> Any:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def _lowerCAmelCase (self :List[Any] )-> List[Any]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def _lowerCAmelCase (self :Any )-> Optional[Any]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_UpperCamelCase )] for number in range(10 )] )
def _lowerCAmelCase (self :int )-> List[Any]:
__A = int(chr(self.current_selection ) )
__A = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _UpperCamelCase )
else:
return
else:
return
def _lowerCAmelCase (self :Dict , _UpperCamelCase :int = 0 )-> Tuple:
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
__A = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_UpperCamelCase )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
__A = int(builtins.input() )
except ValueError:
__A = default_choice
else:
__A = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(_UpperCamelCase , '''\n''' )
return choice
| 364 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[Any] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
snake_case__ : List[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
snake_case__ : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
snake_case__ : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
snake_case__ : List[Any] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
snake_case__ : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
snake_case__ : Dict = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
snake_case__ : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
snake_case__ : Optional[int] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
snake_case__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_MAPPING
snake_case__ : List[Any] = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ : List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : int = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case__ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case__ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case__ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case__ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case__ : List[str] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case__ : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
lowerCAmelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case__ : Optional[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 250 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Optional[int] = "data2vec-audio"
def __init__( self: str ,lowerCamelCase_: Optional[Any]=32 ,lowerCamelCase_: List[Any]=768 ,lowerCamelCase_: Optional[int]=12 ,lowerCamelCase_: List[Any]=12 ,lowerCamelCase_: Any=3072 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Tuple=0.1 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: Any=0.0 ,lowerCamelCase_: int=0.1 ,lowerCamelCase_: Any=0.1 ,lowerCamelCase_: Optional[Any]=0.0_2 ,lowerCamelCase_: List[str]=1e-5 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Tuple=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase_: Tuple=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase_: str=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase_: List[str]=False ,lowerCamelCase_: List[Any]=16 ,lowerCamelCase_: Dict=19 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: List[Any]=0.0_5 ,lowerCamelCase_: List[Any]=10 ,lowerCamelCase_: Union[str, Any]=2 ,lowerCamelCase_: int=0.0 ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=0 ,lowerCamelCase_: Optional[Any]="sum" ,lowerCamelCase_: List[str]=False ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: str=256 ,lowerCamelCase_: List[str]=(512, 512, 512, 512, 1500) ,lowerCamelCase_: List[Any]=(5, 3, 3, 1, 1) ,lowerCamelCase_: int=(1, 2, 3, 1, 1) ,lowerCamelCase_: Union[str, Any]=512 ,lowerCamelCase_: int=0 ,lowerCamelCase_: List[Any]=1 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Tuple=False ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[int]=3 ,lowerCamelCase_: int=None ,**lowerCamelCase_: Optional[Any] ,) -> Any:
super().__init__(**lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ )
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : List[str] = list(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = list(lowerCamelCase_ )
UpperCAmelCase_ : Any = list(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : List[str] = num_conv_pos_embedding_groups
UpperCAmelCase_ : int = conv_pos_kernel_size
UpperCAmelCase_ : List[Any] = len(self.conv_dim )
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[int] = hidden_dropout
UpperCAmelCase_ : Tuple = attention_dropout
UpperCAmelCase_ : Dict = activation_dropout
UpperCAmelCase_ : Dict = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Any = layerdrop
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : str = mask_time_min_masks
UpperCAmelCase_ : List[str] = mask_feature_prob
UpperCAmelCase_ : Optional[int] = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ : Dict = ctc_loss_reduction
UpperCAmelCase_ : Dict = ctc_zero_infinity
# adapter
UpperCAmelCase_ : str = add_adapter
UpperCAmelCase_ : Tuple = adapter_kernel_size
UpperCAmelCase_ : int = adapter_stride
UpperCAmelCase_ : Tuple = num_adapter_layers
UpperCAmelCase_ : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Union[str, Any] = list(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = list(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = list(lowerCamelCase_ )
UpperCAmelCase_ : int = xvector_output_dim
@property
def A__ ( self: Any ) -> Tuple:
return math.prod(self.conv_stride )
| 345 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 1 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase__ = 6_378_137.0
lowerCAmelCase__ = 6_356_752.314_245
lowerCAmelCase__ = 6_378_137
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = (AXIS_A - AXIS_B) / AXIS_A
UpperCamelCase = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase = radians(_SCREAMING_SNAKE_CASE )
UpperCamelCase = radians(_SCREAMING_SNAKE_CASE )
# Equation
UpperCamelCase = sin((phi_a - phi_a) / 2 )
UpperCamelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCamelCase = sqrt(sin_sq_phi + (cos(_SCREAMING_SNAKE_CASE ) * cos(_SCREAMING_SNAKE_CASE ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class _lowerCamelCase ( _lowercase ):
def __init__(self , **__a ) -> Optional[int]:
super().__init__(**__a )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__a )
def snake_case_ (self , **__a ) -> List[Any]:
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
UpperCamelCase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
UpperCamelCase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
UpperCamelCase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
UpperCamelCase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
UpperCamelCase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
UpperCamelCase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
UpperCamelCase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
UpperCamelCase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , __a , *__a , __a=None , __a=None , **__a ) -> str:
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def snake_case_ (self , __a , __a=64 , __a = 0 , __a = 5_12 / 15_00 , __a = 32 , __a = 1 , ) -> List[str]:
UpperCamelCase = load_image(__a )
UpperCamelCase = self.image_processor.size["longest_edge"]
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
UpperCamelCase = self.image_processor(images=__a , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase = self.get_inference_context()
with inference_context():
UpperCamelCase = self._ensure_tensor_on_device(__a , device=self.device )
UpperCamelCase = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
UpperCamelCase = image_embeddings
UpperCamelCase = grid_points.shape[1]
UpperCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , __a , __a ):
UpperCamelCase = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase = input_labels[:, i : i + points_per_batch]
UpperCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_ (self , __a , __a=0.88 , __a=0.95 , __a=0 , __a=1 , ) -> int:
UpperCamelCase = model_inputs.pop("input_boxes" )
UpperCamelCase = model_inputs.pop("is_last" )
UpperCamelCase = model_inputs.pop("original_sizes" ).tolist()
UpperCamelCase = model_inputs.pop("reshaped_input_sizes" ).tolist()
UpperCamelCase = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase = model_outputs["pred_masks"]
UpperCamelCase = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
UpperCamelCase = model_outputs["iou_scores"]
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_ (self , __a , __a=False , __a=False , __a=0.7 , ) -> Optional[int]:
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
UpperCamelCase = torch.cat(__a )
UpperCamelCase = torch.cat(__a )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
UpperCamelCase = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
UpperCamelCase = {}
if output_rle_mask:
UpperCamelCase = rle_mask
if output_bboxes_mask:
UpperCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 244 | 0 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """align_text_model"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , **__UpperCAmelCase , ) ->List[str]:
super().__init__(**__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = pad_token_id
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase , **__UpperCAmelCase) ->"PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase)
a_ , a_ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type") == "align":
a_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[Any] = """align_vision_model"""
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 6_00 , __UpperCAmelCase = 2.0 , __UpperCAmelCase = 3.1 , __UpperCAmelCase = 8 , __UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , __UpperCAmelCase = [32, 16, 24, 40, 80, 1_12, 1_92] , __UpperCAmelCase = [16, 24, 40, 80, 1_12, 1_92, 3_20] , __UpperCAmelCase = [] , __UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , __UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , __UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , __UpperCAmelCase = 0.25 , __UpperCAmelCase = "swish" , __UpperCAmelCase = 25_60 , __UpperCAmelCase = "mean" , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 0.001 , __UpperCAmelCase = 0.99 , __UpperCAmelCase = 0.2 , **__UpperCAmelCase , ) ->str:
super().__init__(**__UpperCAmelCase)
a_ = num_channels
a_ = image_size
a_ = width_coefficient
a_ = depth_coefficient
a_ = depth_divisor
a_ = kernel_sizes
a_ = in_channels
a_ = out_channels
a_ = depthwise_padding
a_ = strides
a_ = num_block_repeats
a_ = expand_ratios
a_ = squeeze_expansion_ratio
a_ = hidden_act
a_ = hidden_dim
a_ = pooling_type
a_ = initializer_range
a_ = batch_norm_eps
a_ = batch_norm_momentum
a_ = drop_connect_rate
a_ = sum(__UpperCAmelCase) * 4
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase , **__UpperCAmelCase) ->"PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase)
a_ , a_ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type") == "align":
a_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Dict = """align"""
a_ : Tuple = True
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=6_40 , __UpperCAmelCase=1.0 , __UpperCAmelCase=0.02 , **__UpperCAmelCase , ) ->List[Any]:
super().__init__(**__UpperCAmelCase)
if text_config is None:
a_ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values.")
if vision_config is None:
a_ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values.")
a_ = AlignTextConfig(**__UpperCAmelCase)
a_ = AlignVisionConfig(**__UpperCAmelCase)
a_ = projection_dim
a_ = temperature_init_value
a_ = initializer_range
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase) ->List[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = copy.deepcopy(self.__dict__)
a_ = self.text_config.to_dict()
a_ = self.vision_config.to_dict()
a_ = self.__class__.model_type
return output | 243 |
"""simple docstring"""
import baseaa
def UpperCamelCase ( UpperCAmelCase ) ->bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return baseaa.baadecode(UpperCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
UpperCamelCase_ = 'Hello World!'
UpperCamelCase_ = baseaa_encode(test)
print(encoded)
UpperCamelCase_ = baseaa_decode(encoded)
print(decoded) | 243 | 1 |
'''simple docstring'''
class UpperCamelCase__: # Public class to implement a graph
def __init__( self : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[list[bool]] )-> None:
"""simple docstring"""
UpperCAmelCase = row
UpperCAmelCase = col
UpperCAmelCase = graph
def a__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def a__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[list[bool]] )-> None:
"""simple docstring"""
UpperCAmelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase )
def a__( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
UpperCAmelCase = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
count += 1
return count
| 91 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 91 | 1 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = SMALL_MODEL_IDENTIFIER
SCREAMING_SNAKE_CASE__ : str = """pt"""
SCREAMING_SNAKE_CASE__ : Dict = """tf"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=SCREAMING_SNAKE_CASE__ )
model_tf.save_pretrained(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = """mock_framework"""
# Framework provided - return whatever the user provides
SCREAMING_SNAKE_CASE__ : Tuple = FeaturesManager.determine_framework(self.test_model , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Dict = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("""transformers.onnx.features.is_tf_available""" , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
SCREAMING_SNAKE_CASE__ : Optional[int] = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("""transformers.onnx.features.is_torch_available""" , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_tf )
# Both in environment -> use PyTorch
SCREAMING_SNAKE_CASE__ : List[str] = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("""transformers.onnx.features.is_tf_available""" , SCREAMING_SNAKE_CASE__ ), patch(
"""transformers.onnx.features.is_torch_available""" , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_pt )
# Both not in environment -> raise error
SCREAMING_SNAKE_CASE__ : Any = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = MagicMock(return_value=SCREAMING_SNAKE_CASE__ )
with patch("""transformers.onnx.features.is_tf_available""" , SCREAMING_SNAKE_CASE__ ), patch(
"""transformers.onnx.features.is_torch_available""" , SCREAMING_SNAKE_CASE__ ):
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = FeaturesManager.determine_framework(self.test_model )
| 25 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a_ ( nn.Module ):
def __init__( self :str , _lowercase :int , _lowercase :int , _lowercase :int , _lowercase :Optional[int]=0.0 , _lowercase :Optional[int] = None , _lowercase :str = "geglu" , _lowercase :Optional[int] = None , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = False , _lowercase :bool = True , _lowercase :str = "layer_norm" , _lowercase :bool = False , ) -> str:
super().__init__()
UpperCAmelCase_ = only_cross_attention
UpperCAmelCase_ = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
UpperCAmelCase_ = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.")
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
UpperCAmelCase_ = AdaLayerNorm(_lowercase , _lowercase)
elif self.use_ada_layer_norm_zero:
UpperCAmelCase_ = AdaLayerNormZero(_lowercase , _lowercase)
else:
UpperCAmelCase_ = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase)
UpperCAmelCase_ = Attention(
query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , dropout=_lowercase , bias=_lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_lowercase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
UpperCAmelCase_ = (
AdaLayerNorm(_lowercase , _lowercase)
if self.use_ada_layer_norm
else nn.LayerNorm(_lowercase , elementwise_affine=_lowercase)
)
UpperCAmelCase_ = Attention(
query_dim=_lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_lowercase , dim_head=_lowercase , dropout=_lowercase , bias=_lowercase , upcast_attention=_lowercase , ) # is self-attn if encoder_hidden_states is none
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# 3. Feed-forward
UpperCAmelCase_ = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase)
UpperCAmelCase_ = FeedForward(_lowercase , dropout=_lowercase , activation_fn=_lowercase , final_dropout=_lowercase)
# let chunk size default to None
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
def __a ( self :Tuple , _lowercase :Optional[int] , _lowercase :int) -> Dict:
# Sets chunk feed-forward
UpperCAmelCase_ = chunk_size
UpperCAmelCase_ = dim
def __a ( self :Dict , _lowercase :torch.FloatTensor , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[torch.LongTensor] = None , _lowercase :Dict[str, Any] = None , _lowercase :Optional[torch.LongTensor] = None , ) -> List[Any]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
UpperCAmelCase_ = self.norma(_lowercase , _lowercase)
elif self.use_ada_layer_norm_zero:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.norma(
_lowercase , _lowercase , _lowercase , hidden_dtype=hidden_states.dtype)
else:
UpperCAmelCase_ = self.norma(_lowercase)
UpperCAmelCase_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
UpperCAmelCase_ = self.attna(
_lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_lowercase , **_lowercase , )
if self.use_ada_layer_norm_zero:
UpperCAmelCase_ = gate_msa.unsqueeze(1) * attn_output
UpperCAmelCase_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
UpperCAmelCase_ = (
self.norma(_lowercase , _lowercase) if self.use_ada_layer_norm else self.norma(_lowercase)
)
UpperCAmelCase_ = self.attna(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=_lowercase , **_lowercase , )
UpperCAmelCase_ = attn_output + hidden_states
# 3. Feed-forward
UpperCAmelCase_ = self.norma(_lowercase)
if self.use_ada_layer_norm_zero:
UpperCAmelCase_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.")
UpperCAmelCase_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
UpperCAmelCase_ = torch.cat(
[self.ff(_lowercase) for hid_slice in norm_hidden_states.chunk(_lowercase , dim=self._chunk_dim)] , dim=self._chunk_dim , )
else:
UpperCAmelCase_ = self.ff(_lowercase)
if self.use_ada_layer_norm_zero:
UpperCAmelCase_ = gate_mlp.unsqueeze(1) * ff_output
UpperCAmelCase_ = ff_output + hidden_states
return hidden_states
class a_ ( nn.Module ):
def __init__( self :List[str] , _lowercase :int , _lowercase :Optional[int] = None , _lowercase :int = 4 , _lowercase :float = 0.0 , _lowercase :str = "geglu" , _lowercase :bool = False , ) -> int:
super().__init__()
UpperCAmelCase_ = int(dim * mult)
UpperCAmelCase_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
UpperCAmelCase_ = GELU(_lowercase , _lowercase)
if activation_fn == "gelu-approximate":
UpperCAmelCase_ = GELU(_lowercase , _lowercase , approximate='''tanh''')
elif activation_fn == "geglu":
UpperCAmelCase_ = GEGLU(_lowercase , _lowercase)
elif activation_fn == "geglu-approximate":
UpperCAmelCase_ = ApproximateGELU(_lowercase , _lowercase)
UpperCAmelCase_ = nn.ModuleList([])
# project in
self.net.append(_lowercase)
# project dropout
self.net.append(nn.Dropout(_lowercase))
# project out
self.net.append(nn.Linear(_lowercase , _lowercase))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_lowercase))
def __a ( self :Union[str, Any] , _lowercase :Dict) -> Optional[Any]:
for module in self.net:
UpperCAmelCase_ = module(_lowercase)
return hidden_states
class a_ ( nn.Module ):
def __init__( self :Any , _lowercase :int , _lowercase :int , _lowercase :str = "none") -> int:
super().__init__()
UpperCAmelCase_ = nn.Linear(_lowercase , _lowercase)
UpperCAmelCase_ = approximate
def __a ( self :Any , _lowercase :int) -> List[Any]:
if gate.device.type != "mps":
return F.gelu(_lowercase , approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype)
def __a ( self :List[str] , _lowercase :List[Any]) -> Dict:
UpperCAmelCase_ = self.proj(_lowercase)
UpperCAmelCase_ = self.gelu(_lowercase)
return hidden_states
class a_ ( nn.Module ):
def __init__( self :Optional[int] , _lowercase :int , _lowercase :int) -> List[Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(_lowercase , dim_out * 2)
def __a ( self :Tuple , _lowercase :Tuple) -> Tuple:
if gate.device.type != "mps":
return F.gelu(_lowercase)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def __a ( self :str , _lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ = self.proj(_lowercase).chunk(2 , dim=-1)
return hidden_states * self.gelu(_lowercase)
class a_ ( nn.Module ):
def __init__( self :int , _lowercase :int , _lowercase :int) -> Any:
super().__init__()
UpperCAmelCase_ = nn.Linear(_lowercase , _lowercase)
def __a ( self :int , _lowercase :List[str]) -> int:
UpperCAmelCase_ = self.proj(_lowercase)
return x * torch.sigmoid(1.702 * x)
class a_ ( nn.Module ):
def __init__( self :Dict , _lowercase :str , _lowercase :List[str]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Embedding(_lowercase , _lowercase)
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = nn.Linear(_lowercase , embedding_dim * 2)
UpperCAmelCase_ = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase)
def __a ( self :Dict , _lowercase :Dict , _lowercase :List[Any]) -> str:
UpperCAmelCase_ = self.linear(self.silu(self.emb(_lowercase)))
UpperCAmelCase_ , UpperCAmelCase_ = torch.chunk(_lowercase , 2)
UpperCAmelCase_ = self.norm(_lowercase) * (1 + scale) + shift
return x
class a_ ( nn.Module ):
def __init__( self :Optional[int] , _lowercase :str , _lowercase :Optional[int]) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ = CombinedTimestepLabelEmbeddings(_lowercase , _lowercase)
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = nn.Linear(_lowercase , 6 * embedding_dim , bias=_lowercase)
UpperCAmelCase_ = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase , eps=1E-6)
def __a ( self :str , _lowercase :Any , _lowercase :List[str] , _lowercase :List[str] , _lowercase :int=None) -> Union[str, Any]:
UpperCAmelCase_ = self.linear(self.silu(self.emb(_lowercase , _lowercase , hidden_dtype=_lowercase)))
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = emb.chunk(6 , dim=1)
UpperCAmelCase_ = self.norm(_lowercase) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a_ ( nn.Module ):
def __init__( self :Any , _lowercase :int , _lowercase :int , _lowercase :int , _lowercase :Optional[str] = None , _lowercase :float = 1E-5) -> str:
super().__init__()
UpperCAmelCase_ = num_groups
UpperCAmelCase_ = eps
if act_fn is None:
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = get_activation(_lowercase)
UpperCAmelCase_ = nn.Linear(_lowercase , out_dim * 2)
def __a ( self :Optional[int] , _lowercase :str , _lowercase :Dict) -> Tuple:
if self.act:
UpperCAmelCase_ = self.act(_lowercase)
UpperCAmelCase_ = self.linear(_lowercase)
UpperCAmelCase_ = emb[:, :, None, None]
UpperCAmelCase_ , UpperCAmelCase_ = emb.chunk(2 , dim=1)
UpperCAmelCase_ = F.group_norm(_lowercase , self.num_groups , eps=self.eps)
UpperCAmelCase_ = x * (1 + scale) + shift
return x
| 344 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 344 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class a__ ( lowerCamelCase_ ):
"""simple docstring"""
__lowerCamelCase = 'openai-gpt'
__lowerCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=40478 , lowercase=512 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase="cls_index" , lowercase=True , lowercase=None , lowercase=True , lowercase=0.1 , **lowercase , ) -> List[str]:
'''simple docstring'''
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = afn
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_first_dropout
A__ = summary_proj_to_labels
super().__init__(**_UpperCamelCase )
| 68 |
'''simple docstring'''
import argparse
import os
import re
_snake_case = 'src/transformers'
# Pattern that looks at the indentation in a line.
_snake_case = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_snake_case = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_snake_case = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_snake_case = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_snake_case = re.compile(r'\[([^\]]+)\]')
def _A ( snake_case ) -> str:
_lowercase : Union[str, Any] = _re_indent.search(snake_case )
return "" if search is None else search.groups()[0]
def _A ( snake_case , snake_case="" , snake_case=None , snake_case=None ) -> Optional[int]:
_lowercase : List[str] = 0
_lowercase : str = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(snake_case ):
index += 1
_lowercase : Optional[int] = ["\n".join(lines[:index] )]
else:
_lowercase : Dict = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase : Any = [lines[index]]
index += 1
while index < len(snake_case ) and (end_prompt is None or not lines[index].startswith(snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(snake_case ) )
if index < len(snake_case ) - 1:
_lowercase : int = [lines[index + 1]]
index += 1
else:
_lowercase : Optional[int] = []
else:
blocks.append("\n".join(snake_case ) )
_lowercase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case ) > 0:
blocks.append("\n".join(snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _A ( snake_case ) -> Optional[int]:
def _inner(snake_case ):
return key(snake_case ).lower().replace("_" , "" )
return _inner
def _A ( snake_case , snake_case=None ) -> List[str]:
# If no key is provided, we use a noop.
def noop(snake_case ):
return x
if key is None:
_lowercase : Optional[int] = noop
# Constants are all uppercase, they go first.
_lowercase : Dict = [obj for obj in objects if key(snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase : int = [obj for obj in objects if key(snake_case )[0].isupper() and not key(snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase : Dict = [obj for obj in objects if not key(snake_case )[0].isupper()]
_lowercase : Union[str, Any] = ignore_underscore(snake_case )
return sorted(snake_case , key=snake_case ) + sorted(snake_case , key=snake_case ) + sorted(snake_case , key=snake_case )
def _A ( snake_case ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(snake_case ):
_lowercase : Optional[Any] = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowercase : str = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Optional[int] = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case )] ) + "]"
_lowercase : Tuple = import_statement.split("\n" )
if len(snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase : Union[str, Any] = 2 if lines[1].strip() == "[" else 1
_lowercase : Optional[int] = [(i, _re_strip_line.search(snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase : Any = sort_objects(snake_case , key=lambda snake_case : x[1] )
_lowercase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Optional[int] = keys[:-1]
_lowercase : Optional[Any] = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case )] )
return "\n".join(snake_case )
else:
# Finally we have to deal with imports fitting on one line
_lowercase : Optional[Any] = _re_bracket_content.sub(_replace , snake_case )
return import_statement
def _A ( snake_case , snake_case=True ) -> Dict:
with open(snake_case , encoding="utf-8" ) as f:
_lowercase : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase : Optional[Any] = split_code_in_indented_blocks(
snake_case , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase : Dict = main_blocks[block_idx]
_lowercase : Union[str, Any] = block.split("\n" )
# Get to the start of the imports.
_lowercase : int = 0
while line_idx < len(snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase : Optional[Any] = len(snake_case )
else:
line_idx += 1
if line_idx >= len(snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase : Any = "\n".join(block_lines[line_idx:-1] )
_lowercase : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase : Optional[int] = split_code_in_indented_blocks(snake_case , indent_level=snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase : Union[str, Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase : str = [(pattern.search(snake_case ).groups()[0] if pattern.search(snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase : List[str] = [(i, key) for i, key in enumerate(snake_case ) if key is not None]
_lowercase : Tuple = [x[0] for x in sorted(snake_case , key=lambda snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase : Any = 0
_lowercase : str = []
for i in range(len(snake_case ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowercase : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(snake_case )
count += 1
# And we put our main block back together with its first and last line.
_lowercase : int = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write("\n".join(snake_case ) )
def _A ( snake_case=True ) -> str:
_lowercase : List[Any] = []
for root, _, files in os.walk(snake_case ):
if "__init__.py" in files:
_lowercase : Tuple = sort_imports(os.path.join(snake_case , "__init__.py" ) , check_only=snake_case )
if result:
_lowercase : Any = [os.path.join(snake_case , "__init__.py" )]
if len(snake_case ) > 0:
raise ValueError(F'''Would overwrite {len(snake_case )} files, run `make style`.''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_snake_case = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 250 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
if gpta_config_file == "":
_snake_case = GPTaConfig()
else:
_snake_case = GPTaConfig.from_json_file(_UpperCamelCase )
_snake_case = GPTaModel(_UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
_snake_case = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_snake_case = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 368 |
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_snake_case = str(bin(_UpperCamelCase ) )[2:] # remove the leading "0b"
_snake_case = str(bin(_UpperCamelCase ) )[2:]
_snake_case = max(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCamelCase ) , b_binary.zfill(_UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : str ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self : str ) -> Optional[int]:
lowercase_ = 1
lowercase_ = 3
lowercase_ = (3_2, 3_2)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def _lowercase ( self : Dict ) -> Tuple:
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def _lowercase ( self : int ) -> List[Any]:
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def _lowercase ( self : Any ) -> int:
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Tuple ) -> int:
def extract(*SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
class lowercase__:
"""simple docstring"""
def __init__( self : Dict ) -> List[Any]:
lowercase_ = torch.ones([0] )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
self.pixel_values.to(SCREAMING_SNAKE_CASE_ )
return self
return Out()
return extract
def _lowercase ( self : int ) -> Any:
lowercase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowercase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A painting of a squirrel eating a burger'''
lowercase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowercase_ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ = output.images
lowercase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowercase_ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Tuple ) -> List[Any]:
lowercase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase_ = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowercase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A painting of a squirrel eating a burger'''
lowercase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowercase_ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
lowercase_ = output.images
lowercase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowercase_ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
lowercase_ = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert isinstance(pipe.scheduler , SCREAMING_SNAKE_CASE_ )
assert pipe.safety_checker is None
lowercase_ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase_ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowercase ( self : Optional[int] ) -> Dict:
lowercase_ = self.dummy_cond_unet
lowercase_ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.dummy_vae
lowercase_ = self.dummy_text_encoder
lowercase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase_ = unet.half()
lowercase_ = vae.half()
lowercase_ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ = StableDiffusionPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowercase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A painting of a squirrel eating a burger'''
lowercase_ = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] ) -> Any:
lowercase_ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=SCREAMING_SNAKE_CASE_ )
lowercase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowercase_ = 4_0_0_3_6_6_0_3_4_6
lowercase_ = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=SCREAMING_SNAKE_CASE_ )
lowercase_ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowercase_ = 2_7_3_4_9_7_1_7_5_5
lowercase_ = 7
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : str ) -> Tuple:
lowercase_ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowercase_ = 1_0_4_4_3_5_5_2_3_4
lowercase_ = 1_2
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 30 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 | 0 |
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : Tuple = []
__a : Optional[int] = []
__a : Dict = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__a : Tuple = len(lowerCAmelCase__ ) if (len(lowerCAmelCase__ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(lowerCAmelCase__ ) , '''Postfix'''.center(lowerCAmelCase__ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCAmelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCAmelCase__ ) == 0:
stack.append(lowerCAmelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCAmelCase__ ) # push x to stack
print(
x.center(8 ) , (''''''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) , (''''''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) , sep=''' | ''' , ) # Output in tabular format
while len(lowerCAmelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) , (''''''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(lowerCAmelCase__ ) # return Postfix as str
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : Dict = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCAmelCase__ ) ):
if infix[i] == "(":
__a : Union[str, Any] = ''')''' # change "(" to ")"
elif infix[i] == ")":
__a : List[Any] = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(lowerCAmelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowercase__ =input('\nEnter an Infix Equation = ') # Input an Infix equation
lowercase__ =''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 90 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase__ =10
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
if array[i] == target:
return i
return -1
def __UpperCamelCase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
__a : List[Any] = 0
__a : Union[str, Any] = len(lowerCAmelCase__ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : str = (left + right) // 3 + 1
__a : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__a : List[str] = one_third - 1
elif array[two_third] < target:
__a : List[str] = two_third + 1
else:
__a : Dict = one_third + 1
__a : Any = two_third - 1
else:
return -1
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : Union[str, Any] = (left + right) // 3 + 1
__a : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase__ , one_third - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =input('Enter numbers separated by comma:\n').strip()
lowercase__ =[int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowercase__ =int(input('Enter the number to be found in the list:\n').strip())
lowercase__ =ite_ternary_search(collection, target)
lowercase__ =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 90 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase__ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_tool('''text-classification''')
self.tool.setup()
SCREAMING_SNAKE_CASE_ : Tuple = load_tool('''text-classification''' , remote=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''])
self.assertEqual(lowercase_ , '''positive''')
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''])
self.assertEqual(lowercase_ , '''positive''')
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''])
self.assertEqual(lowercase_ , '''positive''')
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''])
self.assertEqual(lowercase_ , '''positive''')
| 91 |
"""simple docstring"""
from math import factorial
def _A (__a = 20 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE_ : List[str] = n // 2
return int(factorial(__a ) / (factorial(__a ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCAmelCase_ : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 91 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase ):
snake_case_ = 2
@register_to_config
def __init__( self : List[str] , __lowercase : float = 0.02 , __lowercase : float = 1_00 , __lowercase : float = 1.007 , __lowercase : float = 80 , __lowercase : float = 0.05 , __lowercase : float = 50 , ) -> Optional[int]:
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ : List[Any] =sigma_max
# setable values
SCREAMING_SNAKE_CASE__ : Dict =None
SCREAMING_SNAKE_CASE__ : Dict =None
SCREAMING_SNAKE_CASE__ : int =None # sigma(t_i)
def __magic_name__ ( self : Any , __lowercase : torch.FloatTensor , __lowercase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def __magic_name__ ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =num_inference_steps
SCREAMING_SNAKE_CASE__ : int =np.arange(0 , self.num_inference_steps )[::-1].copy()
SCREAMING_SNAKE_CASE__ : int =torch.from_numpy(__A ).to(__A )
SCREAMING_SNAKE_CASE__ : Tuple =[
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
SCREAMING_SNAKE_CASE__ : Any =torch.tensor(__A , dtype=torch.floataa , device=__A )
def __magic_name__ ( self : List[Any] , __lowercase : torch.FloatTensor , __lowercase : float , __lowercase : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE__ : List[Any] =min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE__ : Any =0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE__ : Any =self.config.s_noise * randn_tensor(sample.shape , generator=__A ).to(sample.device )
SCREAMING_SNAKE_CASE__ : Tuple =sigma + gamma * sigma
SCREAMING_SNAKE_CASE__ : int =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Optional[Any] , __lowercase : torch.FloatTensor , __lowercase : float , __lowercase : float , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE__ : Dict =(sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE__ : Any =sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A , derivative=__A , pred_original_sample=__A )
def __magic_name__ ( self : Dict , __lowercase : torch.FloatTensor , __lowercase : float , __lowercase : float , __lowercase : torch.FloatTensor , __lowercase : torch.FloatTensor , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
SCREAMING_SNAKE_CASE__ : List[Any] =sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE__ : List[str] =(sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE__ : Any =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__A , derivative=__A , pred_original_sample=__A )
def __magic_name__ ( self : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : int ) -> int:
raise NotImplementedError() | 359 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE__ : List[Any] =repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE__ : List[Any] =sorted([comment for comment in issue.get_comments()], key=lambda UpperCamelCase__ : i.created_at, reverse=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 222 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : int = '▁'
UpperCamelCase__ : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCamelCase__ : List[str] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
UpperCamelCase__ : Union[str, Any] = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
UpperCamelCase__ : Optional[Any] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = ['''input_ids''', '''attention_mask''']
lowerCamelCase = []
lowerCamelCase = []
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<unk>" , _lowerCamelCase="m2m100" , _lowerCamelCase = None , _lowerCamelCase=8 , **_lowerCamelCase , ) -> None:
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
A_ : int = language_codes
A_ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
A_ : Optional[int] = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
A_ : Optional[Any] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_lowerCamelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(_lowerCamelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , language_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_lowerCamelCase , **_lowerCamelCase , )
A_ : str = vocab_file
A_ : Dict = load_json(_lowerCamelCase )
A_ : List[Any] = {v: k for k, v in self.encoder.items()}
A_ : str = spm_file
A_ : str = load_spm(_lowerCamelCase , self.sp_model_kwargs )
A_ : Tuple = len(self.encoder )
A_ : Optional[int] = {
self.get_lang_token(_lowerCamelCase ): self.encoder_size + i for i, lang_code in enumerate(_lowerCamelCase )
}
A_ : Union[str, Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_lowerCamelCase )}
A_ : List[Any] = {v: k for k, v in self.lang_token_to_id.items()}
A_ : Union[str, Any] = src_lang if src_lang is not None else """en"""
A_ : List[str] = tgt_lang
A_ : Optional[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A_ : Dict = num_madeup_words
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase_ ( self ) -> str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
A_ : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Tuple:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_lowerCamelCase , self.unk_token )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[int]:
A_ : Union[str, Any] = []
A_ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
A_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A_ : Tuple = [1] * len(self.prefix_tokens )
A_ : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
A_ : Any = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self , _lowerCamelCase ) -> None:
A_ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
A_ : Dict = Path(_lowerCamelCase )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
A_ : List[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
A_ : List[str] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
A_ : int = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = "en" , _lowerCamelCase = None , _lowerCamelCase = "ro" , **_lowerCamelCase , ) -> BatchEncoding:
A_ : Tuple = src_lang
A_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A_ : List[Any] = src_lang
A_ : List[Any] = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , **_lowerCamelCase )
A_ : Tuple = self.get_lang_id(_lowerCamelCase )
A_ : Any = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self ) -> Optional[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self ) -> int:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
A_ : Union[str, Any] = self.get_lang_token(_lowerCamelCase )
A_ : Union[str, Any] = self.lang_token_to_id[lang_token]
A_ : Any = [self.cur_lang_id]
A_ : List[Any] = [self.eos_token_id]
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
A_ : List[str] = self.get_lang_token(_lowerCamelCase )
A_ : Dict = self.lang_token_to_id[lang_token]
A_ : List[str] = [self.cur_lang_id]
A_ : Union[str, Any] = [self.eos_token_id]
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> str:
return self.lang_code_to_token[lang]
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> int:
A_ : Tuple = self.get_lang_token(_lowerCamelCase )
return self.lang_token_to_id[lang_token]
def UpperCAmelCase ( a_ , a_ ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
A_ : Any = sentencepiece.SentencePieceProcessor(**a_ )
spm.Load(str(a_ ) )
return spm
def UpperCAmelCase ( a_ ) -> Union[Dict, List]:
"""simple docstring"""
with open(a_ , """r""" ) as f:
return json.load(a_ )
def UpperCAmelCase ( a_ , a_ ) -> None:
"""simple docstring"""
with open(a_ , """w""" ) as f:
json.dump(a_ , a_ , indent=2 )
| 344 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _UpperCAmelCase :
'''simple docstring'''
a__ =BlenderbotSmallConfig
a__ ={}
a__ ='''gelu'''
def __init__( self , A , A=1_3 , A=7 , A=True , A=False , A=9_9 , A=3_2 , A=2 , A=4 , A=3_7 , A=0.1 , A=0.1 , A=2_0 , A=2 , A=1 , A=0 , ) -> Union[str, Any]:
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Dict = seq_length
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : str = eos_token_id
_UpperCAmelCase : int = pad_token_id
_UpperCAmelCase : Union[str, Any] = bos_token_id
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase : int = prepare_blenderbot_small_inputs_dict(A , A , A )
return config, inputs_dict
def __lowerCAmelCase ( self , A , A ) -> List[Any]:
_UpperCAmelCase : Optional[Any] = TFBlenderbotSmallModel(config=A ).get_decoder()
_UpperCAmelCase : Tuple = inputs_dict['''input_ids''']
_UpperCAmelCase : Tuple = input_ids[:1, :]
_UpperCAmelCase : List[str] = inputs_dict['''attention_mask'''][:1, :]
_UpperCAmelCase : List[Any] = inputs_dict['''head_mask''']
_UpperCAmelCase : Union[str, Any] = 1
# first forward pass
_UpperCAmelCase : Dict = model(A , attention_mask=A , head_mask=A , use_cache=A )
_UpperCAmelCase : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase : Tuple = model(A , attention_mask=A )[0]
_UpperCAmelCase : List[str] = model(A , attention_mask=A , past_key_values=A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A , A , rtol=1E-3 )
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Any=None , ):
if attention_mask is None:
_UpperCAmelCase : Union[str, Any] = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a__ =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a__ =(
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a__ =True
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = TFBlenderbotSmallModelTester(self )
_UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A )
def __lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a__ =[
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
a__ ='''facebook/blenderbot_small-90M'''
@cached_property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , return_tensors='''tf''' )
_UpperCAmelCase : str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A , )
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 360 |
"""simple docstring"""
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase_ ():
_UpperCAmelCase : Optional[int] = [randint(-1000 , 1000 ) for i in range(10 )]
_UpperCAmelCase : int = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase :Optional[Any] = make_dataset()
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
for triplet in permutations(UpperCamelCase__ , 3 ):
if sum(UpperCamelCase__ ) == target:
return tuple(sorted(UpperCamelCase__ ) )
return (0, 0, 0)
def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
arr.sort()
_UpperCAmelCase : Optional[int] = len(UpperCamelCase__ )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase : Any = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase_ ():
_UpperCAmelCase : Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
_UpperCAmelCase : Any = '''
triplet_sum1(*dataset)
'''
_UpperCAmelCase : List[Any] = '''
triplet_sum2(*dataset)
'''
_UpperCAmelCase : List[Any] = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_0000 )
_UpperCAmelCase : List[Any] = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_0000 )
return (min(UpperCamelCase__ ), min(UpperCamelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase :List[str] = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 68 | 0 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def lowercase ( __snake_case : str ):
lowercase_ : int = split_dict._to_yaml_list()
assert len(_A ) == len(_A )
lowercase_ : Union[str, Any] = SplitDict._from_yaml_list(_A )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowercase_ : List[Any] = None
# the split name of split_dict takes over the name of the split info object
lowercase_ : List[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_A ), SplitInfo(dataset_name='''my_dataset''' )] )
def lowercase ( __snake_case : Any ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowercase_ : Union[str, Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 33 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
lowerCAmelCase_ = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCAmelCase_ = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = torch.Size((1, 16) )
self.assertEqual(logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347], device=UpperCamelCase__, dtype=torch.float, )
self.assertTrue(torch.allclose(logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
| 278 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = MvpTokenizer
lowercase = MvpTokenizerFast
lowercase = True
lowercase = filter_roberta_detectors
def lowerCamelCase ( self : Union[str, Any] ):
super().setUp()
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
snake_case__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Tuple = {"""unk_token""": """<unk>"""}
snake_case__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCamelCase ( self : Optional[Any] , **snake_case_ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[str] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase ( self : List[Any] , snake_case_ : List[str] ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase ( self : int ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase ( self : Tuple ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase ( self : Any ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
snake_case__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def lowerCamelCase ( self : Dict ):
snake_case__ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : List[Any] = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""labels""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
@require_torch
def lowerCamelCase ( self : Tuple ):
snake_case__ : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : str = tokenizer(text_target=snake_case_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase ( self : List[str] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : Optional[Any] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCamelCase ( self : str ):
snake_case__ : Optional[Any] = ["""A long paragraph for summarization."""]
snake_case__ : Optional[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case__ : int = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="""pt""" )
snake_case__ : int = inputs["""input_ids"""]
snake_case__ : str = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase ( self : int ):
pass
def lowerCamelCase ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : int = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
snake_case__ : List[str] = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
snake_case__ : int = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case__ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 43 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__a = "naver-clova-ix/donut-base"
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : List[str] ):
snake_case__ : Optional[Any] = DonutProcessor.from_pretrained(snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Any = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
snake_case__ : str = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
snake_case__ : Optional[Any] = self.processor.tokenajson(snake_case_ )
self.assertDictEqual(snake_case_ , snake_case_ )
| 43 | 1 |
import pytest
__A = "__dummy_dataset1__"
__A = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = dataset_loading_script_name
__lowerCamelCase = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=UpperCamelCase__ )
__lowerCamelCase = script_dir / F"""{script_name}.py"""
with open(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
| 90 |
import baseaa
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase_ ( UpperCamelCase__ : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(UpperCamelCase__ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 1 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=1 ,_snake_case=False ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = d_embed
UpperCAmelCase_ : Union[str, Any] = d_proj
UpperCAmelCase_ : Dict = cutoffs + [vocab_size]
UpperCAmelCase_ : Union[str, Any] = [0] + self.cutoffs
UpperCAmelCase_ : str = div_val
UpperCAmelCase_ : Optional[int] = self.cutoffs[0]
UpperCAmelCase_ : str = len(self.cutoffs ) - 1
UpperCAmelCase_ : List[str] = self.shortlist_size + self.n_clusters
UpperCAmelCase_ : Optional[int] = keep_order
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Tuple = []
def UpperCamelCase__ ( self ,_snake_case ):
if self.n_clusters > 0:
UpperCAmelCase_ : int = self.add_weight(
shape=(self.n_clusters, self.d_embed) ,initializer="zeros" ,trainable=_snake_case ,name="cluster_weight" )
UpperCAmelCase_ : Dict = self.add_weight(
shape=(self.n_clusters,) ,initializer="zeros" ,trainable=_snake_case ,name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCAmelCase_ : List[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_projs_._{i}''' ,)
self.out_projs.append(_snake_case )
else:
self.out_projs.append(_snake_case )
UpperCAmelCase_ : List[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_layers_._{i}_._weight''' ,)
UpperCAmelCase_ : str = self.add_weight(
shape=(self.vocab_size,) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_layers_._{i}_._bias''' ,)
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Any = self.d_embed // (self.div_val**i)
UpperCAmelCase_ : Any = self.add_weight(
shape=(d_emb_i, self.d_proj) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_projs_._{i}''' )
self.out_projs.append(_snake_case )
UpperCAmelCase_ : str = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_layers_._{i}_._weight''' ,)
UpperCAmelCase_ : List[Any] = self.add_weight(
shape=(r_idx - l_idx,) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_layers_._{i}_._bias''' ,)
self.out_layers.append((weight, bias) )
super().build(_snake_case )
@staticmethod
def UpperCamelCase__ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
UpperCAmelCase_ : Optional[Any] = x
if proj is not None:
UpperCAmelCase_ : Tuple = tf.einsum("ibd,ed->ibe" ,_snake_case ,_snake_case )
return tf.einsum("ibd,nd->ibn" ,_snake_case ,_snake_case ) + b
@staticmethod
def UpperCamelCase__ ( _snake_case ,_snake_case ):
UpperCAmelCase_ : str = shape_list(_snake_case )
UpperCAmelCase_ : Union[str, Any] = tf.range(lp_size[0] ,dtype=target.dtype )
UpperCAmelCase_ : Any = tf.stack([r, target] ,1 )
return tf.gather_nd(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=True ,_snake_case=False ):
UpperCAmelCase_ : List[str] = 0
if self.n_clusters == 0:
UpperCAmelCase_ : List[Any] = self._logit(_snake_case ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] )
if target is not None:
UpperCAmelCase_ : Dict = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_snake_case ,logits=_snake_case )
UpperCAmelCase_ : Dict = tf.nn.log_softmax(_snake_case ,axis=-1 )
else:
UpperCAmelCase_ : Any = shape_list(_snake_case )
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCAmelCase_ : Dict = (target >= l_idx) & (target < r_idx)
UpperCAmelCase_ : Any = tf.where(_snake_case )
UpperCAmelCase_ : Tuple = tf.boolean_mask(_snake_case ,_snake_case ) - l_idx
if self.div_val == 1:
UpperCAmelCase_ : Optional[int] = self.out_layers[0][0][l_idx:r_idx]
UpperCAmelCase_ : Optional[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCAmelCase_ : List[Any] = self.out_layers[i][0]
UpperCAmelCase_ : int = self.out_layers[i][1]
if i == 0:
UpperCAmelCase_ : Tuple = tf.concat([cur_W, self.cluster_weight] ,0 )
UpperCAmelCase_ : List[str] = tf.concat([cur_b, self.cluster_bias] ,0 )
UpperCAmelCase_ : Any = self._logit(_snake_case ,_snake_case ,_snake_case ,self.out_projs[0] )
UpperCAmelCase_ : Dict = tf.nn.log_softmax(_snake_case )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCAmelCase_ : Dict = tf.boolean_mask(_snake_case ,_snake_case )
UpperCAmelCase_ : int = self._gather_logprob(_snake_case ,_snake_case )
else:
UpperCAmelCase_ : Optional[int] = self._logit(_snake_case ,_snake_case ,_snake_case ,self.out_projs[i] )
UpperCAmelCase_ : List[str] = tf.nn.log_softmax(_snake_case )
UpperCAmelCase_ : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCAmelCase_ : str = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_snake_case )
if target is not None:
UpperCAmelCase_ : Tuple = tf.boolean_mask(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tf.boolean_mask(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = self._gather_logprob(_snake_case ,_snake_case )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_snake_case ,-cur_logprob ,shape_list(_snake_case ) )
UpperCAmelCase_ : str = tf.concat(_snake_case ,axis=-1 )
if target is not None:
if return_mean:
UpperCAmelCase_ : Tuple = tf.reduce_mean(_snake_case )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_snake_case )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_snake_case ,name=self.name ,aggregation="mean" if return_mean else "" )
return out
| 67 |
'''simple docstring'''
import os
from pathlib import Path
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
from torch.utils.cpp_extension import load
UpperCAmelCase_ : Union[str, Any] = Path(_SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
UpperCAmelCase_ : Any = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , _SCREAMING_SNAKE_CASE , with_cuda=_SCREAMING_SNAKE_CASE , extra_include_paths=[str(_SCREAMING_SNAKE_CASE )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 67 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Optional[Any] ={
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str =['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] =[
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str =[
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict =[
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(lowercase , lowercase , lowercase )
count += _in_place_quick_sort(lowercase , lowercase , p - 1 )
count += _in_place_quick_sort(lowercase , p + 1 , lowercase )
return count
def A ( lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(lowercase , lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
_UpperCAmelCase : Union[str, Any] = TemporaryFile()
_UpperCAmelCase : List[Any] = 100 # 1000 elements are to be sorted
_UpperCAmelCase ,_UpperCAmelCase : Any = 0, 1 # mean and standard deviation
_UpperCAmelCase : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_UpperCAmelCase : Any = np.load(outfile)
_UpperCAmelCase : str = len(M) - 1
_UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 222 | 0 |
import qiskit
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE__ : List[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE__ : int = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| 191 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = '''▁'''
_lowerCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowerCamelCase : int = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''xlm-roberta-base''': 5_1_2,
'''xlm-roberta-large''': 5_1_2,
'''xlm-roberta-large-finetuned-conll02-dutch''': 5_1_2,
'''xlm-roberta-large-finetuned-conll02-spanish''': 5_1_2,
'''xlm-roberta-large-finetuned-conll03-english''': 5_1_2,
'''xlm-roberta-large-finetuned-conll03-german''': 5_1_2,
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : Dict, _UpperCAmelCase : str, _UpperCAmelCase : Optional[int]="<s>", _UpperCAmelCase : Optional[int]="</s>", _UpperCAmelCase : Dict="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Union[str, Any]="<unk>", _UpperCAmelCase : List[Any]="<pad>", _UpperCAmelCase : str="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : List[Any], ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : int = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE__ : List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : int = len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int, _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self : Any, _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : List[Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def A_ ( self : Union[str, Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : List[str], _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def A_ ( self : Optional[Any], _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A_ ( self : Tuple, _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A_ ( self : Any, _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def A_ ( self : Union[str, Any], _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : Any = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 191 | 1 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a = '''pt'''
elif is_tf_available():
a = '''tf'''
else:
a = '''jax'''
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = PerceiverTokenizer
UpperCAmelCase : str = False
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
_A = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase_ ( self : Dict ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowerCAmelCase_ ( self : List[Any] , **_UpperCAmelCase : Tuple ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=False , _UpperCAmelCase : Any=20 , _UpperCAmelCase : Dict=5 ):
_A = []
for i in range(len(_UpperCAmelCase ) ):
try:
_A = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_A = list(filter(lambda _UpperCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
_A = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
_A = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
_A = toks + toks
# toks_str = [t[1] for t in toks]
_A = [t[0] for t in toks]
# Ensure consistency
_A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
_A = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
_A = ' ' + output_txt
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowerCAmelCase_ ( self : List[str] ):
_A = self.perceiver_tokenizer
_A = 'Unicode €.'
_A = tokenizer(_UpperCAmelCase )
_A = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
_A = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
_A = tokenizer('e è é ê ë' )
_A = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
_A = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.perceiver_tokenizer
_A = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_A = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
_A = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
_A = list(batch.input_ids.numpy()[0] )
else:
_A = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.perceiver_tokenizer
_A = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_A = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowerCAmelCase_ ( self : str ):
_A = self.perceiver_tokenizer
_A = [
'Summary of the text.',
'Another summary.',
]
_A = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowerCAmelCase_ ( self : Tuple ):
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = ' He is very happy, UNwant\u00E9d,running'
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
_A = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
_A = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
_A = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_A = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
_A = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
_A = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_A = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_A = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_A = json.load(_UpperCAmelCase )
_A = [F'''<extra_id_{i}>''' for i in range(125 )]
_A = added_tokens_extra_ids + [
'an_additional_special_token'
]
_A = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_A = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_A = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
_A = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
def lowerCAmelCase_ ( self : List[Any] ):
pass
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_A = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 315 |
import string
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
A__ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
A__ = string.ascii_uppercase.find(SCREAMING_SNAKE_CASE_ )
A__ = num - key
if num < 0:
A__ = num + len(string.ascii_uppercase )
A__ = translated + string.ascii_uppercase[num]
else:
A__ = translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
A__ = input("Encrypted message: " )
A__ = message.upper()
decrypt(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 68 | 0 |
lowerCamelCase : dict[tuple[int, int, int], int] ={}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase__ : str = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase__ : Union[str, Any] = _calculate(days - 1 , __lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase__ : List[Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase__ : List[str] = _calculate(days - 1 , __lowerCAmelCase , 0 )
UpperCamelCase__ : Tuple = state_late + state_absent + state_ontime
UpperCamelCase__ : Dict = prizestrings
return prizestrings
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 30 ) -> int:
return _calculate(__lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 354 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
UpperCamelCase__ : List[Any] = f'The input value of [n={number}] has to be > 0'
raise ValueError(__lowerCAmelCase )
else:
UpperCamelCase__ : Optional[Any] = sylvester(number - 1 )
UpperCamelCase__ : str = num - 1
UpperCamelCase__ : int = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""") | 196 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , """Tatoeba directory does not exist.""" )
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Any = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowercase)
@slow
def UpperCamelCase__ ( self) -> Any:
self.resolver.convert_models(['''heb-eng'''])
@slow
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase , __UpperCamelCase :List[str] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__lowercase)
assert mmeta["long_pair"] == "heb-eng"
| 43 | from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__UpperCamelCase :List[str] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__UpperCamelCase :List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(SCREAMING_SNAKE_CASE )
return next_generation
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = []
for _ in range(SCREAMING_SNAKE_CASE ):
# Create output image
__UpperCamelCase :Dict = Image.new('''RGB''' , (len(cells[0] ), len(SCREAMING_SNAKE_CASE )) )
__UpperCamelCase :Any = img.load()
# Save cells to image
for x in range(len(SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
__UpperCamelCase :Optional[Any] = 255 - cells[y][x] * 255
__UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = new_generation(SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 43 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : int = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[str] = 'longformer'
def __init__( self : List[Any] , lowerCAmelCase__ : Union[List[int], int] = 512 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 30522 , lowerCAmelCase__ : int = 768 , lowerCAmelCase__ : int = 12 , lowerCAmelCase__ : int = 12 , lowerCAmelCase__ : int = 3072 , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : float = 1e-1_2 , lowerCAmelCase__ : bool = False , **lowerCAmelCase__ : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = attention_window
_UpperCamelCase = sep_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = onnx_export
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : "PretrainedConfig" , lowerCAmelCase__ : str = "default" , lowerCAmelCase__ : "List[PatchingSpec]" = None ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = True
@property
def snake_case__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCamelCase = super().outputs
if self.task == "default":
_UpperCamelCase = {0: '''batch'''}
return outputs
@property
def snake_case__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1e-4
@property
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : "PreTrainedTokenizerBase" , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_UpperCamelCase = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_UpperCamelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
_UpperCamelCase = 1
return inputs
| 287 |
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(lowercase, x % y )
def a__ ( lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(lowercase, lowercase )
def a__ ( lowercase : int = 20 ) -> int:
"""simple docstring"""
_UpperCamelCase = 1
for i in range(1, n + 1 ):
_UpperCamelCase = lcm(lowercase, lowercase )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 287 | 1 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__UpperCAmelCase =False
try:
__UpperCAmelCase =_is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class a__ :
def __init__( self : Dict , a : str = None , a : list = [] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = choices
__lowerCamelCase = prompt
if sys.platform == "win32":
__lowerCamelCase = '''*'''
else:
__lowerCamelCase = '''➔ '''
def SCREAMING_SNAKE_CASE__ ( self : str , a : int , a : str = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : int ):
"""simple docstring"""
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Direction , a : int = 1 ):
"""simple docstring"""
__lowerCamelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = int(chr(self.current_selection ) )
__lowerCamelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
__lowerCamelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase = int(builtins.input() )
except ValueError:
__lowerCamelCase = default_choice
else:
__lowerCamelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(a , '''\n''' )
return choice
| 67 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , head_mask=a )
__lowerCamelCase = model(a , token_type_ids=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTDoubleHeadsModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = OpenAIGPTForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : Optional[int] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ):
"""simple docstring"""
__lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = OpenAIGPTModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(a )
__lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is
__lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCamelCase = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].tolist() , a )
| 67 | 1 |
from __future__ import annotations
a__: Tuple = []
def UpperCamelCase__( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int )->bool:
for i in range(len(UpperCamelCase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase__ , -1 , -1 ) , range(UpperCamelCase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase__ , -1 , -1 ) , range(UpperCamelCase__ , len(UpperCamelCase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCamelCase__( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int )->bool:
if row >= len(UpperCamelCase__ ):
solution.append(UpperCamelCase__ )
printboard(UpperCamelCase__ )
print()
return True
for i in range(len(UpperCamelCase__ ) ):
if is_safe(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ = 1
solve(UpperCamelCase__ , row + 1 )
A__ = 0
return False
def UpperCamelCase__( UpperCamelCase__ : list[list[int]] )->None:
for i in range(len(UpperCamelCase__ ) ):
for j in range(len(UpperCamelCase__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
a__: List[Any] = 8
a__: Union[str, Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 39 |
def UpperCamelCase__( UpperCamelCase__ : str )->str:
A__ = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase__( UpperCamelCase__ : str )->dict[str, str]:
A__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A__ = remove_duplicates(key.upper() )
A__ = len(UpperCamelCase__ )
# First fill cipher with key characters
A__ = {alphabet[i]: char for i, char in enumerate(UpperCamelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCamelCase__ ) , 26 ):
A__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A__ = alphabet[i - offset]
A__ = char
return cipher_alphabet
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : dict[str, str] )->str:
return "".join(cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : dict[str, str] )->str:
A__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def UpperCamelCase__( )->None:
A__ = input('''Enter message to encode or decode: ''' ).strip()
A__ = input('''Enter keyword: ''' ).strip()
A__ = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
A__ = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
A__ = create_cipher_map(UpperCamelCase__ )
print(func(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 39 | 1 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase_ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCamelCase_ = spec.loader.load_module()
lowerCamelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase_ = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowerCamelCase_ = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __lowerCamelCase ( ) -> Any:
__SCREAMING_SNAKE_CASE :Any = []
for config_class in list(CONFIG_MAPPING.values() ):
__SCREAMING_SNAKE_CASE :List[Any] = False
# source code of `config_class`
__SCREAMING_SNAKE_CASE :Tuple = inspect.getsource(a_ )
__SCREAMING_SNAKE_CASE :Dict = _re_checkpoint.findall(a_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__SCREAMING_SNAKE_CASE :Tuple = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
__SCREAMING_SNAKE_CASE :List[str] = True
break
__SCREAMING_SNAKE_CASE :Optional[int] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(a_ )
if len(a_ ) > 0:
__SCREAMING_SNAKE_CASE :int = '''\n'''.join(sorted(a_ ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 191 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE_ : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE_ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet''' ,)
return unet
@property
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet_class_cond''' ,)
return unet
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
if class_cond:
__SCREAMING_SNAKE_CASE :str = self.dummy_cond_unet
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
__SCREAMING_SNAKE_CASE :List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0 ) -> Dict:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :List[str] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE :Optional[Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :List[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = 0
__SCREAMING_SNAKE_CASE :Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_components()
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = 1
__SCREAMING_SNAKE_CASE :List[str] = None
__SCREAMING_SNAKE_CASE :List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :int = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :Any = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :Optional[Any] = None
__SCREAMING_SNAKE_CASE :List[Any] = 0
__SCREAMING_SNAKE_CASE :Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :int = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__="cpu" ,SCREAMING_SNAKE_CASE__=torch.floataa ,SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__SCREAMING_SNAKE_CASE :int = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ ,shape=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = latents
return inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__="cpu" ,SCREAMING_SNAKE_CASE__=torch.floataa ,SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ) -> int:
"""simple docstring"""
if type(SCREAMING_SNAKE_CASE__ ) == str:
__SCREAMING_SNAKE_CASE :int = torch.device(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = randn_tensor(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )
return latents
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Dict = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = self.get_inputs()
__SCREAMING_SNAKE_CASE :List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :Union[str, Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Dict = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = self.get_inputs()
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :int = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :int = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :str = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 191 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_A , n - 1 , _A ) * a) % mod
else:
lowercase = binary_exponentiation(_A , n / 2 , _A )
return (b * b) % mod
# a prime number
__lowerCAmelCase : Optional[int] =7_0_1
__lowerCAmelCase : Dict =1_0_0_0_0_0_0_0_0_0
__lowerCAmelCase : Optional[int] =1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 354 | """simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32 | 0 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : str =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : str =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : str =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : List[str] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Optional[Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Tuple =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Union[str, Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Union[str, Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Any =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : List[str] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : int =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
requires_backends(lowerCAmelCase__ , ['''torch'''] )
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : List[str] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Tuple =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Optional[Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : int =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Optional[int] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Optional[int] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Optional[Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : int =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : List[str] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Any =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : str =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Dict =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Optional[Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Union[str, Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : str =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Any =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Tuple =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Union[str, Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : List[Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : int =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Optional[int] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : int =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Dict =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Optional[int] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Optional[int] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Tuple =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Optional[Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : str =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : List[Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : List[Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Any =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : List[str] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : int =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : str =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Any =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : List[str] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Tuple =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : List[Any] =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
class lowercase ( metaclass=__UpperCamelCase ):
lowercase_ : Dict =['torch']
def __init__( self ,*A__ ,**A__):
requires_backends(self ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
@classmethod
def A__ ( cls ,*A__ ,**A__):
requires_backends(cls ,['''torch'''])
| 101 |
import unittest
from knapsack import knapsack as k
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = 0
lowercase__: List[Any] = [0]
lowercase__: str = [0]
lowercase__: Tuple = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
lowercase__: Optional[Any] = [60]
lowercase__: Dict = [10]
lowercase__: str = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = 3
lowercase__: List[str] = [1, 2, 3]
lowercase__: Union[str, Any] = [3, 2, 1]
lowercase__: Union[str, Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 5 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = 50
lowercase__: str = [60, 100, 120]
lowercase__: Any = [10, 20, 30]
lowercase__: List[Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 196 | 0 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=snake_case_ ):
lowerCAmelCase__ = ['torch', 'scipy']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def SCREAMING_SNAKE_CASE_( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def SCREAMING_SNAKE_CASE_( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["torch", "scipy"] )
| 47 |
__A ='''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__A =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__A ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 47 | 1 |
from math import factorial
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase, lowerCamelCase ) or not isinstance(lowerCamelCase, lowerCamelCase ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
lowerCamelCase : List[str] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCamelCase : Optional[Any] = float(factorial(lowerCamelCase ) )
coefficient /= factorial(lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 287 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase ="""pytorch_model.bin"""
_lowerCamelCase ="""pytorch_model.bin.index.json"""
_lowerCamelCase ="""adapter_config.json"""
_lowerCamelCase ="""adapter_model.bin"""
_lowerCamelCase ="""adapter_model.safetensors"""
_lowerCamelCase ="""tf_model.h5"""
_lowerCamelCase ="""tf_model.h5.index.json"""
_lowerCamelCase ="""model.ckpt"""
_lowerCamelCase ="""flax_model.msgpack"""
_lowerCamelCase ="""flax_model.msgpack.index.json"""
_lowerCamelCase ="""model.safetensors"""
_lowerCamelCase ="""model.safetensors.index.json"""
_lowerCamelCase ="""config.json"""
_lowerCamelCase ="""preprocessor_config.json"""
_lowerCamelCase =FEATURE_EXTRACTOR_NAME
_lowerCamelCase ="""generation_config.json"""
_lowerCamelCase ="""modelcard.json"""
_lowerCamelCase ="""▁"""
_lowerCamelCase =SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase =[
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase =[[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase =[[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _a ( lowerCamelCase ):
if version.parse(lowerCamelCase ) < version.parse(lowerCamelCase ):
if "dev" in min_version:
lowerCamelCase : Optional[int] = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
lowerCamelCase : int = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" )
| 287 | 1 |
import argparse
import os
import re
import packaging.version
_lowerCamelCase : Union[str, Any] = '''examples/'''
_lowerCamelCase : str = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_lowerCamelCase : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_lowerCamelCase : List[str] = '''README.md'''
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ):
with open(UpperCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE = replace.replace("VERSION" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = re_pattern.sub(UpperCAmelCase__ , UpperCAmelCase__ )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
for folder, directories, fnames in os.walk(UpperCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , pattern="examples" )
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : int=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not patch:
update_version_in_examples(UpperCAmelCase__ )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE = "1. Want to contribute a new model?"
with open(UpperCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(UpperCAmelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase__ )
def __lowerCamelCase ():
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE = f.read()
SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["init"][0].search(UpperCAmelCase__ ).groups()[0]
return packaging.version.parse(UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
SCREAMING_SNAKE_CASE = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE = input(F"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase__ ) == 0:
SCREAMING_SNAKE_CASE = default_version
print(F"Updating version to {version}." )
global_version_update(UpperCAmelCase__ , patch=UpperCAmelCase__ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = get_version()
SCREAMING_SNAKE_CASE = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
SCREAMING_SNAKE_CASE = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE = input(F"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase__ ) == 0:
SCREAMING_SNAKE_CASE = dev_version
print(F"Updating version to {version}." )
global_version_update(UpperCAmelCase__ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_lowerCamelCase : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 206 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase ( a ):
lowercase__ : Tuple = """unispeech-sat"""
def __init__( self : str , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Tuple=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : Tuple=3_072 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Union[str, Any]="group" , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : Tuple=(512, 512, 512, 512, 512, 512, 512) , _UpperCamelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , _UpperCamelCase : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Dict=128 , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : Tuple=False , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Optional[Any]=0.0_5 , _UpperCamelCase : Union[str, Any]=10 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : str=0.0 , _UpperCamelCase : List[Any]=10 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : Any=320 , _UpperCamelCase : List[Any]=2 , _UpperCamelCase : str=0.1 , _UpperCamelCase : str=100 , _UpperCamelCase : int=256 , _UpperCamelCase : Optional[Any]=256 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : str="mean" , _UpperCamelCase : int=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Any=256 , _UpperCamelCase : str=(512, 512, 512, 512, 1_500) , _UpperCamelCase : List[Any]=(5, 3, 3, 1, 1) , _UpperCamelCase : Union[str, Any]=(1, 2, 3, 1, 1) , _UpperCamelCase : Any=512 , _UpperCamelCase : str=0 , _UpperCamelCase : int=1 , _UpperCamelCase : Any=2 , _UpperCamelCase : Optional[Any]=504 , **_UpperCamelCase : str , ) -> int:
'''simple docstring'''
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = num_clusters
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def __snake_case( self : Tuple ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 206 | 1 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_a = '''.'''
if __name__ == "__main__":
_a = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
_a = []
_a = []
with open(doctest_file_path) as fp:
for line in fp:
_a = line.strip()
_a = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_a = '''\n'''.join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 39 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __A ( )-> Tuple:
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class __lowerCamelCase ( nn.Module):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase ):
nonlocal batch_sizes
batch_sizes.append(UpperCAmelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function('hello' )
self.assertListEqual(UpperCAmelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(UpperCAmelCase ):
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(UpperCAmelCase ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(UpperCAmelCase ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , UpperCAmelCase )
_UpperCAmelCase = release_memory(UpperCAmelCase )
self.assertEqual(torch.cuda.memory_allocated() , UpperCAmelCase )
| 39 | 1 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
UpperCamelCase__ : Optional[int] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
UpperCamelCase__ : List[str] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
UpperCamelCase__ : Optional[int] = primes[:idx]
break
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
UpperCamelCase__ : Dict = False
for r in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = pow(SCREAMING_SNAKE_CASE , d * 2**r , SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
UpperCamelCase__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _a ( ):
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 51 |
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : List[str] = generate_pascal_triangle(SCREAMING_SNAKE_CASE )
for row_idx in range(SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCamelCase__ : list[list[int]] = []
for current_row_idx in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = populate_current_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
triangle.append(SCREAMING_SNAKE_CASE )
return triangle
def _a ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = 1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE ):
calculate_current_element(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return current_row
def _a ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCamelCase__ : List[Any] = triangle[current_row_idx - 1][current_col_idx]
UpperCamelCase__ : Tuple = above_to_left_elt + above_to_right_elt
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCamelCase__ : list[list[int]] = [[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Tuple = [0] + result[-1] + [0]
UpperCamelCase__ : Any = row_index + 1
# Calculate the number of distinct elements in a row
UpperCamelCase__ : str = sum(divmod(SCREAMING_SNAKE_CASE , 2 ) )
UpperCamelCase__ : Optional[int] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCamelCase__ : int = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCamelCase__ : List[Any] = row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE )
return result
def _a ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE : Callable , SCREAMING_SNAKE_CASE : int ) -> None:
UpperCamelCase__ : Tuple = F"{func.__name__}({value})"
UpperCamelCase__ : Dict = timeit(F"__main__.{call}" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 51 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = '▁'
SCREAMING_SNAKE_CASE__ : Any = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : Dict = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
'google/reformer-crime-and-punishment': 524288,
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = VOCAB_FILES_NAMES
lowerCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__=[] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : str = vocab_file
lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Dict:
return self.sp_model.get_piece_size()
def _lowercase ( self ) -> Dict[str, int]:
lowerCamelCase : Optional[Any] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Dict = None
return state
def __setstate__( self , UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]:
return self.sp_model.piece_to_id(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> int:
if index < self.sp_model.get_piece_size():
lowerCamelCase : Dict = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def _lowercase ( self , UpperCamelCase__ ) -> Dict:
lowerCamelCase : Any = []
lowerCamelCase : str = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
lowerCamelCase : Union[str, Any] = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Optional[int] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : str=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : Tuple=3_7 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any:
a_ : Tuple = parent
a_ : int = batch_size
a_ : Tuple = seq_length
a_ : List[Any] = is_training
a_ : List[str] = use_token_type_ids
a_ : Dict = use_labels
a_ : Any = vocab_size
a_ : List[str] = hidden_size
a_ : Tuple = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Dict = intermediate_size
a_ : Any = hidden_act
a_ : List[str] = hidden_dropout_prob
a_ : Tuple = attention_probs_dropout_prob
a_ : Optional[Any] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : int = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : List[str] = num_labels
a_ : Union[str, Any] = num_choices
a_ : str = scope
a_ : Tuple = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Any = None
if self.use_token_type_ids:
a_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : List[Any] = None
a_ : Union[str, Any] = None
a_ : List[Any] = None
if self.use_labels:
a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
a_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
a_ : Dict = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
a_ : str = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
a_ : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
a_ : Any = self.num_labels
a_ : Dict = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : Optional[Any] = config_and_inputs
a_ : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Tuple = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case__ : List[str] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case__ : Dict = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=False ) -> List[str]:
a_ : str = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a_ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : str = inputs_dict['labels']
a_ : Optional[int] = inputs_dict['labels']
a_ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
a_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : str = OpenAIGPTModelTester(self )
a_ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=3_7 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
a_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : str = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is
a_ : Tuple = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a_ : Dict = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
| 32 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[float]] ) -> str:
SCREAMING_SNAKE_CASE_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
SCREAMING_SNAKE_CASE_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
SCREAMING_SNAKE_CASE_ = [[0.0, 0.0], [0.0, 0.0]]
SCREAMING_SNAKE_CASE_ = matrix[1][1], matrix[0][0]
SCREAMING_SNAKE_CASE_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
SCREAMING_SNAKE_CASE_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
SCREAMING_SNAKE_CASE_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
SCREAMING_SNAKE_CASE_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
SCREAMING_SNAKE_CASE_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
SCREAMING_SNAKE_CASE_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
SCREAMING_SNAKE_CASE_ = array(A__ )
for i in range(3 ):
for j in range(3 ):
SCREAMING_SNAKE_CASE_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
SCREAMING_SNAKE_CASE_ = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' ) | 351 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase_ ( __UpperCAmelCase : str = "isbn/0140328726" ) -> dict:
SCREAMING_SNAKE_CASE_ = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
SCREAMING_SNAKE_CASE_ = f"{olid} is not a valid Open Library olid"
raise ValueError(__UpperCAmelCase )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def UpperCAmelCase_ ( __UpperCAmelCase : dict ) -> dict:
SCREAMING_SNAKE_CASE_ = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
SCREAMING_SNAKE_CASE_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
SCREAMING_SNAKE_CASE_ = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
SCREAMING_SNAKE_CASE_ = data['First sentence']['value']
for key, value in data.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = ', '.join(__UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCamelCase__ : Optional[Any] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowerCamelCase__ : Union[str, Any] = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''') | 210 | 0 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class A__ ( A__ ):
A__ = ['image_processor']
A__ = 'SamImageProcessor'
def __init__( self : Optional[Any] , _a : Any ) -> Dict:
'''simple docstring'''
super().__init__(_a )
_SCREAMING_SNAKE_CASE =self.image_processor
_SCREAMING_SNAKE_CASE =-10
_SCREAMING_SNAKE_CASE =self.image_processor.size['longest_edge']
def __call__( self : Any , _a : Optional[int]=None , _a : str=None , _a : str=None , _a : Any=None , _a : Optional[Union[str, TensorType]] = None , **_a : Optional[int] , ) -> BatchEncoding:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processor(
_a , return_tensors=_a , **_a , )
# pop arguments that are not used in the foward but used nevertheless
_SCREAMING_SNAKE_CASE =encoding_image_processor['original_sizes']
if hasattr(_a , 'numpy' ): # Checks if Torch or TF tensor
_SCREAMING_SNAKE_CASE =original_sizes.numpy()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._check_and_preprocess_points(
input_points=_a , input_labels=_a , input_boxes=_a , )
_SCREAMING_SNAKE_CASE =self._normalize_and_convert(
_a , _a , input_points=_a , input_labels=_a , input_boxes=_a , return_tensors=_a , )
return encoding_image_processor
def A ( self : List[str] , _a : str , _a : Optional[Any] , _a : Optional[int]=None , _a : Union[str, Any]=None , _a : Optional[int]=None , _a : Union[str, Any]="pt" , ) -> Optional[int]:
'''simple docstring'''
if input_points is not None:
if len(_a ) != len(_a ):
_SCREAMING_SNAKE_CASE =[
self._normalize_coordinates(self.target_size , _a , original_sizes[0] ) for point in input_points
]
else:
_SCREAMING_SNAKE_CASE =[
self._normalize_coordinates(self.target_size , _a , _a )
for point, original_size in zip(_a , _a )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._pad_points_and_labels(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(_a )
if input_labels is not None:
_SCREAMING_SNAKE_CASE =np.array(_a )
if input_boxes is not None:
if len(_a ) != len(_a ):
_SCREAMING_SNAKE_CASE =[
self._normalize_coordinates(self.target_size , _a , original_sizes[0] , is_bounding_box=_a )
for box in input_boxes
]
else:
_SCREAMING_SNAKE_CASE =[
self._normalize_coordinates(self.target_size , _a , _a , is_bounding_box=_a )
for box, original_size in zip(_a , _a )
]
_SCREAMING_SNAKE_CASE =np.array(_a )
if input_boxes is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a )
# boxes batch size of 1 by default
_SCREAMING_SNAKE_CASE =input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(_a )
# boxes batch size of 1 by default
_SCREAMING_SNAKE_CASE =tf.expand_dims(_a , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE =input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(_a )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE =tf.expand_dims(_a , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE =input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor(_a )
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE =tf.expand_dims(_a , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def A ( self : Optional[int] , _a : Union[str, Any] , _a : int ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =max([point.shape[0] for point in input_points] )
_SCREAMING_SNAKE_CASE =[]
for i, point in enumerate(_a ):
if point.shape[0] != expected_nb_points:
_SCREAMING_SNAKE_CASE =np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_SCREAMING_SNAKE_CASE =np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_a )
_SCREAMING_SNAKE_CASE =processed_input_points
return input_points, input_labels
def A ( self : str , _a : int , _a : np.ndarray , _a : str , _a : List[str]=False ) -> np.ndarray:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =original_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.image_processor._get_preprocess_shape(_a , longest_edge=_a )
_SCREAMING_SNAKE_CASE =deepcopy(_a ).astype(_a )
if is_bounding_box:
_SCREAMING_SNAKE_CASE =coords.reshape(-1 , 2 , 2 )
_SCREAMING_SNAKE_CASE =coords[..., 0] * (new_w / old_w)
_SCREAMING_SNAKE_CASE =coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_SCREAMING_SNAKE_CASE =coords.reshape(-1 , 4 )
return coords
def A ( self : List[str] , _a : Optional[int]=None , _a : Optional[int]=None , _a : Union[str, Any]=None , ) -> Dict:
'''simple docstring'''
if input_points is not None:
if hasattr(_a , 'numpy' ): # Checks for TF or Torch tensor
_SCREAMING_SNAKE_CASE =input_points.numpy().tolist()
if not isinstance(_a , _a ) or not isinstance(input_points[0] , _a ):
raise ValueError('Input points must be a list of list of floating points.' )
_SCREAMING_SNAKE_CASE =[np.array(_a ) for input_point in input_points]
else:
_SCREAMING_SNAKE_CASE =None
if input_labels is not None:
if hasattr(_a , 'numpy' ):
_SCREAMING_SNAKE_CASE =input_labels.numpy().tolist()
if not isinstance(_a , _a ) or not isinstance(input_labels[0] , _a ):
raise ValueError('Input labels must be a list of list integers.' )
_SCREAMING_SNAKE_CASE =[np.array(_a ) for label in input_labels]
else:
_SCREAMING_SNAKE_CASE =None
if input_boxes is not None:
if hasattr(_a , 'numpy' ):
_SCREAMING_SNAKE_CASE =input_boxes.numpy().tolist()
if (
not isinstance(_a , _a )
or not isinstance(input_boxes[0] , _a )
or not isinstance(input_boxes[0][0] , _a )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
_SCREAMING_SNAKE_CASE =[np.array(_a ).astype(np.floataa ) for box in input_boxes]
else:
_SCREAMING_SNAKE_CASE =None
return input_points, input_labels, input_boxes
@property
def A ( self : List[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processor.model_input_names
return list(dict.fromkeys(_a ) )
def A ( self : Optional[int] , *_a : str , **_a : List[Any] ) -> str:
'''simple docstring'''
return self.image_processor.post_process_masks(*_a , **_a )
| 47 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_UpperCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_UpperCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_UpperCamelCase )
return parser.parse_args()
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parse_args()
# Import training_script as a module.
_SCREAMING_SNAKE_CASE =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_SCREAMING_SNAKE_CASE =script_fpath.stem
_SCREAMING_SNAKE_CASE =importlib.import_module(_UpperCamelCase )
# Patch sys.argv
_SCREAMING_SNAKE_CASE =[args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 47 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase__ = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
UpperCamelCase__ = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
UpperCamelCase__ = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n"
def _UpperCamelCase (a__ :Any , a__ :str ):
"""simple docstring"""
return float((preds == labels).mean() )
def _UpperCamelCase (a__ :str , a__ :Any ):
"""simple docstring"""
UpperCamelCase__ = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase (a__ :List[Any] , a__ :Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = np.array(lowerCamelCase__ )
UpperCamelCase__ = np.array(lowerCamelCase__ )
UpperCamelCase__ = en_sentvecs.shape[0]
# mean centering
UpperCamelCase__ = en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
UpperCamelCase__ = in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
UpperCamelCase__ = cdist(lowerCamelCase__ , lowerCamelCase__ , """cosine""" )
UpperCamelCase__ = np.array(range(lowerCamelCase__ ) )
UpperCamelCase__ = sim.argsort(axis=1 )[:, :10]
UpperCamelCase__ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _lowerCamelCase ( self ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__lowerCAmelCase , __lowerCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__lowerCAmelCase , __lowerCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 353 |
UpperCamelCase__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCamelCase__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def _UpperCamelCase (a__ :float , a__ :str , a__ :str ):
"""simple docstring"""
UpperCamelCase__ = from_type.lower().strip("""s""" )
UpperCamelCase__ = to_type.lower().strip("""s""" )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
UpperCamelCase__ = UNIT_SYMBOL.get(a__ , a__ )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a__ )}"""
)
raise ValueError(a__ )
UpperCamelCase__ = METRIC_CONVERSION[from_sanitized]
UpperCamelCase__ = METRIC_CONVERSION[to_sanitized]
UpperCamelCase__ = 1
if from_exponent > to_exponent:
UpperCamelCase__ = from_exponent - to_exponent
else:
UpperCamelCase__ = -(to_exponent - from_exponent)
return value * pow(10 , a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 | 0 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase :str = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=None , **lowercase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , lowercase , )
super().__init__(args=lowercase , **lowercase ) | 206 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase :str = TypeVar('''T''')
class _lowerCAmelCase ( Generic[T] ):
def __init__(self , lowercase = True ):
A_ : dict[T, list[T]] = {} # dictionary of lists
A_ : Any = directed
def _a (self , lowercase , lowercase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
self.adj_list[destination_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
A_ : Dict = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase )
A_ : int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A_ : Optional[Any] = [destination_vertex]
A_ : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
A_ : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A_ : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A_ : int = [destination_vertex]
A_ : List[str] = []
return self
def __repr__(self ):
return pformat(self.adj_list ) | 206 | 1 |
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 360 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Union[str, Any] = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: Optional[int] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: List[str] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: int ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Any = self.get_rust_tokenizer()
lowerCamelCase__ : str = self.get_image_processor()
lowerCamelCase__ : str = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
lowerCamelCase__ : Any = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[int] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : str = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
lowerCamelCase__ : Optional[int] = self.get_image_processor(do_normalize=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=UpperCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : int = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : str = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__ : Optional[Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__ : Any = self.prepare_image_inputs()
lowerCamelCase__ : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Any = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : str = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 129 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ : Tuple = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def A (__A : Any , __A : List[str] , __A : Optional[int]=8 ) -> str:
"""simple docstring"""
UpperCAmelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __snake_case ( a ):
def __init__( self : List[str] , _snake_case : UNetaDConditionModel , _snake_case : DDPMScheduler , _snake_case : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
UpperCAmelCase_ = 2 ** (len(self.movq.config.block_out_channels) - 1)
def lowerCamelCase ( self : int , _snake_case : List[str] , _snake_case : Any , _snake_case : int , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Optional[Any]):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case)
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""")
UpperCAmelCase_ = latents.to(_snake_case)
UpperCAmelCase_ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self : Any , _snake_case : Union[str, Any]=0):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
UpperCAmelCase_ = torch.device(F"""cuda:{gpu_id}""")
UpperCAmelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=0):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
UpperCAmelCase_ = torch.device(F"""cuda:{gpu_id}""")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_snake_case)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case)
# We'll offload the last model manually.
UpperCAmelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case)
def __call__( self : List[Any] , _snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , _snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , _snake_case : int = 512 , _snake_case : int = 512 , _snake_case : int = 100 , _snake_case : float = 4.0 , _snake_case : int = 1 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , ):
"""simple docstring"""
UpperCAmelCase_ = self._execution_device
UpperCAmelCase_ = guidance_scale > 1.0
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = torch.cat(_snake_case , dim=0)
UpperCAmelCase_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = torch.cat(_snake_case , dim=0)
if do_classifier_free_guidance:
UpperCAmelCase_ = image_embeds.repeat_interleave(_snake_case , dim=0)
UpperCAmelCase_ = negative_image_embeds.repeat_interleave(_snake_case , dim=0)
UpperCAmelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=_snake_case)
self.scheduler.set_timesteps(_snake_case , device=_snake_case)
UpperCAmelCase_ = self.scheduler.timesteps
UpperCAmelCase_ = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ = downscale_height_and_width(_snake_case , _snake_case , self.movq_scale_factor)
# create initial latent
UpperCAmelCase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _snake_case , _snake_case , _snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(_snake_case)):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
UpperCAmelCase_ = {'''image_embeds''': image_embeds}
UpperCAmelCase_ = self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.split(latents.shape[1] , dim=1)
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2)
UpperCAmelCase_ , UpperCAmelCase_ = variance_pred.chunk(2)
UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , )[0]
# post-processing
UpperCAmelCase_ = self.movq.decode(_snake_case , force_not_quantize=_snake_case)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""")
if output_type in ["np", "pil"]:
UpperCAmelCase_ = image * 0.5 + 0.5
UpperCAmelCase_ = image.clamp(0 , 1)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 51 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = StableDiffusionDiffEditPipeline
__snake_case : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
__snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
__snake_case : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case : Tuple = frozenset([] )
def A ( self : str ):
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
lowerCAmelCase_ : Tuple = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
lowerCAmelCase_ : List[Any] = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase , set_alpha_to_zero=UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
lowerCAmelCase_ : int = CLIPTextModel(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[str]=0 ):
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if str(UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase_ : List[Any] = torch.manual_seed(UpperCAmelCase )
else:
lowerCAmelCase_ : int = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0 ):
lowerCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ : str = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("""RGB""" )
if str(UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase_ : int = torch.manual_seed(UpperCAmelCase )
else:
lowerCAmelCase_ : Any = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any]=0 ):
lowerCAmelCase_ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ : Tuple = Image.fromarray(np.uinta(UpperCAmelCase ) ).convert("""RGB""" )
if str(UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase_ : Optional[Any] = torch.manual_seed(UpperCAmelCase )
else:
lowerCAmelCase_ : str = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCAmelCase_ : Tuple = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def A ( self : Any ):
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Optional[Any] = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCAmelCase_ : str = self.get_dummy_inputs(UpperCAmelCase )
lowerCAmelCase_ : Any = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : List[str] = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
lowerCAmelCase_ : Any = self.get_dummy_inputs(UpperCAmelCase )
lowerCAmelCase_ : Tuple = pipe_loaded(**UpperCAmelCase )[0]
lowerCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = """cpu"""
lowerCAmelCase_ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase_ : Tuple = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.get_dummy_mask_inputs(UpperCAmelCase )
lowerCAmelCase_ : List[str] = pipe.generate_mask(**UpperCAmelCase )
lowerCAmelCase_ : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCAmelCase_ : Any = np.array([0] * 9 )
lowerCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : int ):
lowerCAmelCase_ : Optional[Any] = """cpu"""
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : int = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCAmelCase_ : Dict = self.get_dummy_inversion_inputs(UpperCAmelCase )
lowerCAmelCase_ : Tuple = pipe.invert(**UpperCAmelCase ).images
lowerCAmelCase_ : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase_ : List[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
lowerCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def A ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : Tuple ):
lowerCAmelCase_ : str = """cpu"""
lowerCAmelCase_ : int = self.get_dummy_components()
lowerCAmelCase_ : Optional[Any] = {"""beta_start""": 0.0_0085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowerCAmelCase_ : Dict = DPMSolverMultistepScheduler(**UpperCAmelCase )
lowerCAmelCase_ : Any = DPMSolverMultistepInverseScheduler(**UpperCAmelCase )
lowerCAmelCase_ : List[Any] = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.get_dummy_inversion_inputs(UpperCAmelCase )
lowerCAmelCase_ : int = pipe.invert(**UpperCAmelCase ).images
lowerCAmelCase_ : str = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase_ : List[str] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
lowerCAmelCase_ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
@require_torch_gpu
@slow
class __a ( unittest.TestCase ):
def A ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Optional[Any] ):
lowerCAmelCase_ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowerCAmelCase_ : Any = raw_image.convert("""RGB""" ).resize((7_68, 7_68) )
lowerCAmelCase_ : Union[str, Any] = raw_image
def A ( self : int ):
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
lowerCAmelCase_ : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase_ : Tuple = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCAmelCase_ : Tuple = """a bowl of fruit"""
lowerCAmelCase_ : Any = """a bowl of pears"""
lowerCAmelCase_ : Dict = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
lowerCAmelCase_ : int = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase ).latents
lowerCAmelCase_ : str = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowerCAmelCase_ : Optional[Any] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
lowerCAmelCase_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase_ : int = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCAmelCase_ : int = """a bowl of fruit"""
lowerCAmelCase_ : Dict = """a bowl of pears"""
lowerCAmelCase_ : Dict = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCAmelCase , target_prompt=UpperCAmelCase , generator=UpperCAmelCase , )
lowerCAmelCase_ : List[Any] = pipe.invert(
prompt=UpperCAmelCase , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCAmelCase , num_inference_steps=25 , ).latents
lowerCAmelCase_ : Optional[Any] = pipe(
prompt=UpperCAmelCase , mask_image=UpperCAmelCase , image_latents=UpperCAmelCase , generator=UpperCAmelCase , negative_prompt=UpperCAmelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowerCAmelCase_ : Optional[int] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 28 |
from math import factorial, pi
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCAmelCase_ : Optional[int] = float(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCAmelCase_ : int = float(lowercase__ )
lowerCAmelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 28 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" )
if "norm" in key:
lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" )
if "layer_norm1" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" )
if "attn.q" in key:
lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" )
if "bot_conv" in key:
lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase_ :List[Any] = value
return new_state_dict
def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ :Optional[Any] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ :List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCAmelCase_ :List[Any] = prepare_img()
lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ )
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
lowerCAmelCase_ :Dict = model(lowercase__ )
lowerCAmelCase_ :Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase_ :Optional[Any] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
lowerCAmelCase_ :Any = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 84 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__a : Tuple = """pt"""
elif is_tf_available():
__a : int = """tf"""
else:
__a : Tuple = """jax"""
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = ByTaTokenizer
__a : str = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
__lowercase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=20 , lowerCAmelCase__=5 ) -> Tuple[str, list]:
'''simple docstring'''
__lowercase = []
for i in range(len(lowerCAmelCase__ ) ):
try:
__lowercase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowercase = list(filter(lambda lowerCAmelCase__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowerCAmelCase__ ) )
__lowercase = list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
__lowercase = ''' ''' + output_txt
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
__lowercase = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = '''Unicode €.'''
__lowercase = tokenizer(lowerCAmelCase__ )
__lowercase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__lowercase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''Unicode €.</s>''' )
__lowercase = tokenizer('''e è é ê ë''' )
__lowercase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__lowercase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__lowercase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
__lowercase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
__lowercase = list(batch.input_ids.numpy()[0] )
else:
__lowercase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowercase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCAmelCase__ )
self.assertIn('''attention_mask''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = [
'''Summary of the text.''',
'''Another summary.''',
]
__lowercase = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding='''max_length''' , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization. </s>''']
__lowercase = ['''Summary of the text. </s>''']
# fmt: off
__lowercase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
__lowercase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
__lowercase = tokenizer(lowerCAmelCase__ , text_target=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch['''input_ids'''][0] )
self.assertEqual(lowerCAmelCase__ , batch['''labels'''][0] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__lowercase = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
__lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__lowercase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__lowercase = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowerCAmelCase__ )
__lowercase = [F"<extra_id_{i}>" for i in range(1_25 )]
__lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCAmelCase__ )]
__lowercase = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_class.from_pretrained(lowerCAmelCase__ )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
__lowercase = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowercase = 0
__lowercase = tokenizer.convert_ids_to_tokens(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + '''_id''' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '''_id''' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , attr + '''_id''' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '''_id''' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' ) , [] )
setattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] ) | 210 | 0 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCAmelCase__ = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.1_5},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
lowerCAmelCase__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase__ = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCAmelCase__ = '''allenai'''
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = dict((re.sub(R'''@@$''', '''''', A_ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''', '''</w>''', A_ ), v) for k, v in d.items() )
_lowerCamelCase : str = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
_lowerCamelCase : int = d[k] # restore
return da
def snake_case_ ( A_ : List[Any], A_ : Union[str, Any] ):
'''simple docstring'''
assert os.path.exists(A_ )
os.makedirs(A_, exist_ok=A_ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
_lowerCamelCase : Optional[int] = basename(A_ )
_lowerCamelCase : int = dirname(A_ )
_lowerCamelCase : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
_lowerCamelCase : Any = cls.hub_models()
_lowerCamelCase : Optional[Any] = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
_lowerCamelCase : int = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
_lowerCamelCase : Dict = hub_utils.from_pretrained(
A_, A_, A_, archive_map=A_, **A_ )
_lowerCamelCase : Optional[Any] = vars(chkpt['''args''']['''model'''] )
_lowerCamelCase : List[Any] = args['''source_lang''']
_lowerCamelCase : Dict = args['''target_lang''']
_lowerCamelCase : int = dirname(A_ )
_lowerCamelCase : str = basename(A_ )
# dicts
_lowerCamelCase : Union[str, Any] = os.path.join(A_, F'''dict.{src_lang}.txt''' )
_lowerCamelCase : Dict = os.path.join(A_, F'''dict.{tgt_lang}.txt''' )
_lowerCamelCase : Union[str, Any] = Dictionary.load(A_ )
_lowerCamelCase : Any = rewrite_dict_keys(src_dict.indices )
_lowerCamelCase : int = len(A_ )
_lowerCamelCase : Optional[int] = os.path.join(A_, '''vocab-src.json''' )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(A_, ensure_ascii=A_, indent=A_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
_lowerCamelCase : List[str] = True
for k in src_vocab.keys():
if not k.islower():
_lowerCamelCase : Tuple = False
break
_lowerCamelCase : Optional[int] = Dictionary.load(A_ )
_lowerCamelCase : Optional[Any] = rewrite_dict_keys(tgt_dict.indices )
_lowerCamelCase : Optional[Any] = len(A_ )
_lowerCamelCase : int = os.path.join(A_, '''vocab-tgt.json''' )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(A_, ensure_ascii=A_, indent=A_ ) )
# merges_file (bpecodes)
_lowerCamelCase : Optional[Any] = os.path.join(A_, VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
_lowerCamelCase : List[str] = os.path.join(A_, A_ )
if os.path.exists(A_ ):
break
with open(A_, encoding='''utf-8''' ) as fin:
_lowerCamelCase : Union[str, Any] = fin.read()
_lowerCamelCase : Optional[Any] = re.sub(R''' \d+$''', '''''', A_, 0, re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(A_, '''w''', encoding='''utf-8''' ) as fout:
fout.write(A_ )
# model config
_lowerCamelCase : str = os.path.join(A_, '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
_lowerCamelCase : Optional[int] = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
_lowerCamelCase : Tuple = 5
_lowerCamelCase : Dict = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
_lowerCamelCase : Any = best_score_hparams[model_dir]['''length_penalty''']
else:
_lowerCamelCase : Optional[int] = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(A_, ensure_ascii=A_, indent=A_ ) )
# tokenizer config
_lowerCamelCase : Dict = os.path.join(A_, A_ )
_lowerCamelCase : List[str] = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 10_24,
'''do_lower_case''': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(A_, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(A_, ensure_ascii=A_, indent=A_ ) )
# model
_lowerCamelCase : str = chkpt['''models'''][0]
_lowerCamelCase : List[Any] = model.state_dict()
# rename keys to start with 'model.'
_lowerCamelCase : str = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
_lowerCamelCase : str = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(A_, A_ )
_lowerCamelCase : int = FSMTConfig.from_pretrained(A_ )
_lowerCamelCase : List[str] = FSMTForConditionalGeneration(A_ )
# check that it loads ok
model_new.load_state_dict(A_, strict=A_ )
# save
_lowerCamelCase : Any = os.path.join(A_, A_ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(A_, A_ )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 175 |
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
return math.sqrt(A_ ) * math.sqrt(A_ ) == num
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Dict = 0
_lowerCamelCase : Tuple = n
while left <= right:
_lowerCamelCase : List[str] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : int = mid - 1
else:
_lowerCamelCase : Optional[Any] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase_ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 'albert'
def __init__( self , _a=30_000 , _a=128 , _a=4_096 , _a=12 , _a=1 , _a=64 , _a=16_384 , _a=1 , _a="gelu_new" , _a=0 , _a=0 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0.1 , _a="absolute" , _a=0 , _a=2 , _a=3 , **_a , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 45 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCamelCase_ ( nn.Module ):
def __init__( self , A = 16 , A = 88 , A = None , A = 1 , A = 0.0 , A = 32 , A = None , A = False , A = None , A = None , A = "geglu" , A = None , ) -> str:
super().__init__()
UpperCAmelCase : int = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=A , attention_head_dim=A , in_channels=A , num_layers=A , dropout=A , norm_num_groups=A , cross_attention_dim=A , attention_bias=A , sample_size=A , num_vector_embeds=A , activation_fn=A , num_embeds_ada_norm=A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase : Any = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase : int = [1, 0]
def _lowercase( self , A , A , A=None , A=None , A=None , A = True , ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = hidden_states
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : List[str] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase : Tuple = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase : str = self.transformer_index_for_condition[i]
UpperCAmelCase : Optional[Any] = self.transformers[transformer_index](
A , encoder_hidden_states=A , timestep=A , cross_attention_kwargs=A , return_dict=A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase : Union[str, Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=A )
| 338 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : str = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'levit'
def __init__( self , A=224 , A=3 , A=3 , A=2 , A=1 , A=16 , A=[128, 256, 384] , A=[4, 8, 12] , A=[4, 4, 4] , A=[16, 16, 16] , A=0 , A=[2, 2, 2] , A=[2, 2, 2] , A=0.0_2 , **A , ) -> int:
super().__init__(**A )
UpperCAmelCase : Any = image_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Tuple = kernel_size
UpperCAmelCase : Optional[int] = stride
UpperCAmelCase : Dict = padding
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = depths
UpperCAmelCase : Any = key_dim
UpperCAmelCase : str = drop_path_rate
UpperCAmelCase : List[Any] = patch_size
UpperCAmelCase : str = attention_ratio
UpperCAmelCase : Optional[Any] = mlp_ratio
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : int = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowercase( self ) -> float:
return 1e-4
| 338 | 1 |
import random
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : str = a[left_index]
lowerCAmelCase : int = left_index + 1
for j in range(left_index + 1, lowerCamelCase_ ):
if a[j] < pivot:
lowerCAmelCase : Tuple = a[i], a[j]
i += 1
lowerCAmelCase : List[str] = a[i - 1], a[left_index]
return i - 1
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if left < right:
lowerCAmelCase : Tuple = random.randint(lowerCamelCase_, right - 1 )
lowerCAmelCase : List[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCAmelCase : List[str] = partition(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
quick_sort_random(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCamelCase_, pivot_index + 1, lowerCamelCase_ ) # recursive quicksort to the right of the pivot point
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : int = input('Enter numbers separated by a comma:\n' ).strip()
lowerCAmelCase : int = [int(lowerCamelCase_ ) for item in user_input.split(',' )]
quick_sort_random(lowerCamelCase_, 0, len(lowerCamelCase_ ) )
print(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 138 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase__ ( lowerCamelCase_ : Any):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ : Dict = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''')
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ : int = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''')
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ : Optional[int] = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''')
if "img_encoder.layers" in name:
lowerCAmelCase__ : Tuple = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''')
if "blocks" in name and "res" not in name:
lowerCAmelCase__ : Dict = name.replace('''blocks''' ,'''layers''')
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ : Optional[int] = name.replace('''attn''' ,'''self_attn''')
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''proj''' ,'''out_proj''')
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ : List[Any] = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''')
if "norm1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''norm1''' ,'''layer_norm1''')
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ : int = name.replace('''norm2''' ,'''layer_norm2''')
if "img_encoder.norm" in name:
lowerCAmelCase__ : List[Any] = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''')
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ : List[Any] = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''')
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ : Tuple = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''')
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''')
if "ln_1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_1''' ,'''layer_norm1''')
if "ln_2" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_2''' ,'''layer_norm2''')
if "c_fc" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('''c_fc''' ,'''fc1''')
if "c_proj" in name:
lowerCAmelCase__ : List[str] = name.replace('''c_proj''' ,'''fc2''')
if "text_encoder" in name:
lowerCAmelCase__ : str = name.replace('''text_encoder''' ,'''text_model''')
if "ln_final" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_final''' ,'''final_layer_norm''')
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ : Tuple = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''')
if "img_projector.linear_out." in name:
lowerCAmelCase__ : Optional[Any] = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''')
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ : Tuple = name.replace('''text_projector.linear_hidden''' ,'''text_projection''')
if "text_projector.linear_out" in name:
lowerCAmelCase__ : Dict = name.replace('''text_projector.linear_out''' ,'''text_projection.3''')
return name
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[str]):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : List[str] = orig_state_dict.pop(lowerCamelCase_)
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ : Tuple = key.split('''.''')
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = int(key_split[2]), int(key_split[4])
lowerCAmelCase__ : Any = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Tuple = val[:dim, :]
lowerCAmelCase__ : Dict = val[dim : dim * 2, :]
lowerCAmelCase__ : List[str] = val[-dim:, :]
else:
lowerCAmelCase__ : List[Any] = val[:dim]
lowerCAmelCase__ : List[str] = val[dim : dim * 2]
lowerCAmelCase__ : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ : Dict = key.split('''.''')
lowerCAmelCase__ : List[str] = int(key_split[3])
lowerCAmelCase__ : Any = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Tuple = val[:dim, :]
lowerCAmelCase__ : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : List[Any] = val[-dim:, :]
else:
lowerCAmelCase__ : Union[str, Any] = val[:dim]
lowerCAmelCase__ : List[str] = val[dim : dim * 2]
lowerCAmelCase__ : str = val[-dim:]
else:
lowerCAmelCase__ : int = rename_key(lowerCamelCase_)
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ : Dict = val.squeeze_()
else:
lowerCAmelCase__ : Tuple = val
return orig_state_dict
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : str = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple="groupvit-gcc-yfcc" ,lowerCamelCase_ : int=False):
'''simple docstring'''
lowerCAmelCase__ : Dict = GroupViTConfig()
lowerCAmelCase__ : Dict = GroupViTModel(lowerCamelCase_).eval()
lowerCAmelCase__ : Optional[int] = torch.load(lowerCamelCase_ ,map_location='''cpu''')['''model''']
lowerCAmelCase__ : List[Any] = convert_state_dict(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : Any = model.load_state_dict(lowerCamelCase_ ,strict=lowerCamelCase_)
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase_) == 0)
# verify result
lowerCAmelCase__ : Optional[Any] = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
lowerCAmelCase__ : Tuple = prepare_img()
lowerCAmelCase__ : Dict = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors='''pt''')
with torch.no_grad():
lowerCAmelCase__ : str = model(**lowerCamelCase_)
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[13.3523, 6.3629]])
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ : Tuple = torch.tensor([[16.1873, 8.6230]])
else:
raise ValueError(f"""Model name {model_name} not supported.""")
assert torch.allclose(outputs.logits_per_image ,lowerCamelCase_ ,atol=1E-3)
processor.save_pretrained(lowerCamelCase_)
model.save_pretrained(lowerCamelCase_)
print('''Successfully saved processor and model to''' ,lowerCamelCase_)
if push_to_hub:
print('''Pushing to the hub...''')
processor.push_to_hub(lowerCamelCase_ ,organization='''nielsr''')
model.push_to_hub(lowerCamelCase_ ,organization='''nielsr''')
if __name__ == "__main__":
__snake_case : int =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__snake_case : Tuple =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 129 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase_( metaclass=A__ ):
'''simple docstring'''
lowercase__ : int = ['torch', 'torchsde']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def snake_case__ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def snake_case__ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 73 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__SCREAMING_SNAKE_CASE : List[Any] = '''src/diffusers'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE : Tuple = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = spec.loader.load_module()
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Tuple ) -> int:
return line.startswith(lowercase_ ) or len(lowercase_ ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , lowercase_ ) is not None
def lowerCAmelCase_( lowercase_ : Any ) -> Tuple:
_lowerCamelCase = object_name.split('''.''' )
_lowerCamelCase = 0
# First let's find the module where our object lives.
_lowerCamelCase = parts[i]
while i < len(lowercase_ ) and not os.path.isfile(os.path.join(lowercase_ , F"""{module}.py""" ) ):
i += 1
if i < len(lowercase_ ):
_lowerCamelCase = os.path.join(lowercase_ , parts[i] )
if i >= len(lowercase_ ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(lowercase_ , F"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCamelCase = f.readlines()
# Now let's find the class / func in the code!
_lowerCamelCase = ''''''
_lowerCamelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowercase_ ) and re.search(rF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowercase_ ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCamelCase = line_index
while line_index < len(lowercase_ ) and _should_continue(lines[line_index] , lowercase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCamelCase = lines[start_index:line_index]
return "".join(lowercase_ )
__SCREAMING_SNAKE_CASE : str = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__SCREAMING_SNAKE_CASE : List[Any] = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__SCREAMING_SNAKE_CASE : List[str] = re.compile(R'''<FILL\s+[^>]*>''')
def lowerCAmelCase_( lowercase_ : List[Any] ) -> str:
_lowerCamelCase = code.split('''\n''' )
_lowerCamelCase = 0
while idx < len(lowercase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowercase_ ):
return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Union[str, Any]:
_lowerCamelCase = len(get_indent(lowercase_ ) ) > 0
if has_indent:
_lowerCamelCase = F"""class Bla:\n{code}"""
_lowerCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=lowercase_ )
_lowerCamelCase = black.format_str(lowercase_ , mode=lowercase_ )
_lowerCamelCase , _lowerCamelCase = style_docstrings_in_code(lowercase_ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def lowerCAmelCase_( lowercase_ : Dict , lowercase_ : Union[str, Any]=False ) -> str:
with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_lowerCamelCase = f.readlines()
_lowerCamelCase = []
_lowerCamelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowercase_ ):
_lowerCamelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = search.groups()
_lowerCamelCase = find_code_in_diffusers(lowercase_ )
_lowerCamelCase = get_indent(lowercase_ )
_lowerCamelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCamelCase = theoretical_indent
_lowerCamelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCamelCase = True
while line_index < len(lowercase_ ) and should_continue:
line_index += 1
if line_index >= len(lowercase_ ):
break
_lowerCamelCase = lines[line_index]
_lowerCamelCase = _should_continue(lowercase_ , lowercase_ ) and re.search(F"""^{indent}# End copy""" , lowercase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCamelCase = lines[start_index:line_index]
_lowerCamelCase = ''''''.join(lowercase_ )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCamelCase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(lowercase_ ) is None]
_lowerCamelCase = '''\n'''.join(lowercase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowercase_ ) > 0:
_lowerCamelCase = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
_lowerCamelCase = [_re_replace_pattern.search(lowercase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = pattern.groups()
_lowerCamelCase = re.sub(lowercase_ , lowercase_ , lowercase_ )
if option.strip() == "all-casing":
_lowerCamelCase = re.sub(obja.lower() , obja.lower() , lowercase_ )
_lowerCamelCase = re.sub(obja.upper() , obja.upper() , lowercase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCamelCase = blackify(lines[start_index - 1] + theoretical_code )
_lowerCamelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCamelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCamelCase = start_index + 1
if overwrite and len(lowercase_ ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(lowercase_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowercase_ )
return diffs
def lowerCAmelCase_( lowercase_ : bool = False ) -> Union[str, Any]:
_lowerCamelCase = glob.glob(os.path.join(lowercase_ , '''**/*.py''' ) , recursive=lowercase_ )
_lowerCamelCase = []
for filename in all_files:
_lowerCamelCase = is_copy_consistent(lowercase_ , lowercase_ )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(lowercase_ ) > 0:
_lowerCamelCase = '''\n'''.join(lowercase_ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 73 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_SCREAMING_SNAKE_CASE = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
_SCREAMING_SNAKE_CASE = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
_SCREAMING_SNAKE_CASE = "question"
_SCREAMING_SNAKE_CASE = "context"
_SCREAMING_SNAKE_CASE = "answers"
@property
def A ( self : Dict ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 28 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCamelCase : Optional[int] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_lowerCamelCase : Union[str, Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_lowerCamelCase : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_lowerCamelCase : List[Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_lowerCamelCase : List[str] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) )
UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(A__ )
UpperCamelCase = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) )
UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ )
UpperCamelCase = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 28 | 1 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda __UpperCamelCase : next(__UpperCamelCase )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=__UpperCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 85 | """simple docstring"""
import os
from pathlib import Path
def lowerCAmelCase ():
"""simple docstring"""
from torch.utils.cpp_extension import load
__UpperCamelCase =Path(__UpperCamelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__UpperCamelCase =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __UpperCamelCase , with_cuda=__UpperCamelCase , extra_include_paths=[str(__UpperCamelCase )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 85 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=snake_case , )
UpperCamelCase_ : Optional[int] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_one=snake_case , )
UpperCamelCase_ : Tuple = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_zero=snake_case , )
torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
UpperCamelCase_ : List[str] = CLIPTextModel(snake_case )
UpperCamelCase_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ : Any = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Dict , snake_case : Tuple=0 ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = floats_tensor((1, 1_6, 1_6) , rng=random.Random(snake_case ) ).to(snake_case )
UpperCamelCase_ : Optional[int] = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith('mps' ):
UpperCamelCase_ : int = torch.manual_seed(snake_case )
else:
UpperCamelCase_ : Any = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCamelCase_ : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Dict=0 ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case ) ).to(snake_case )
UpperCamelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ : Optional[int] = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' )
if str(snake_case ).startswith('mps' ):
UpperCamelCase_ : int = torch.manual_seed(snake_case )
else:
UpperCamelCase_ : Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCamelCase_ : List[str] = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : str , snake_case : Dict=0 ) -> int:
"""simple docstring"""
UpperCamelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case ) ).to(snake_case )
UpperCamelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ : List[str] = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' )
if str(snake_case ).startswith('mps' ):
UpperCamelCase_ : Any = torch.manual_seed(snake_case )
else:
UpperCamelCase_ : int = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCamelCase_ : int = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
UpperCamelCase_ : int = self.get_dummy_components()
UpperCamelCase_ : str = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(snake_case , snake_case , snake_case )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCamelCase_ : Any = self.get_dummy_inputs(snake_case )
UpperCamelCase_ : Any = pipe(**snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case )
UpperCamelCase_ : int = self.pipeline_class.from_pretrained(snake_case )
pipe_loaded.to(snake_case )
pipe_loaded.set_progress_bar_config(disable=snake_case )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case , snake_case ) is None , f"`{optional_component}` did not stay set to None after loading." , )
UpperCamelCase_ : Dict = self.get_dummy_inputs(snake_case )
UpperCamelCase_ : int = pipe_loaded(**snake_case )[0]
UpperCamelCase_ : int = np.abs(output - output_loaded ).max()
self.assertLess(snake_case , 1e-4 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = 'cpu'
UpperCamelCase_ : Any = self.get_dummy_components()
UpperCamelCase_ : int = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(snake_case )
UpperCamelCase_ : Optional[int] = pipe.generate_mask(**snake_case )
UpperCamelCase_ : List[str] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
UpperCamelCase_ : Tuple = np.array([0] * 9 )
UpperCamelCase_ : int = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = 'cpu'
UpperCamelCase_ : List[str] = self.get_dummy_components()
UpperCamelCase_ : List[Any] = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase_ : Optional[int] = self.get_dummy_inversion_inputs(snake_case )
UpperCamelCase_ : Optional[Any] = pipe.invert(**snake_case ).images
UpperCamelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
UpperCamelCase_ : str = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
UpperCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 'cpu'
UpperCamelCase_ : Tuple = self.get_dummy_components()
UpperCamelCase_ : int = {'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
UpperCamelCase_ : str = DPMSolverMultistepScheduler(**snake_case )
UpperCamelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler(**snake_case )
UpperCamelCase_ : Optional[int] = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase_ : Any = self.get_dummy_inversion_inputs(snake_case )
UpperCamelCase_ : Union[str, Any] = pipe.invert(**snake_case ).images
UpperCamelCase_ : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
UpperCamelCase_ : int = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
UpperCamelCase_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1e-3 )
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
UpperCamelCase_ : Any = raw_image.convert('RGB' ).resize((7_6_8, 7_6_8) )
UpperCamelCase_ : Optional[Any] = raw_image
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = torch.manual_seed(0 )
UpperCamelCase_ : List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=snake_case , torch_dtype=torch.floataa )
UpperCamelCase_ : int = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCamelCase_ : Tuple = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase_ : Optional[int] = 'a bowl of fruit'
UpperCamelCase_ : int = 'a bowl of pears'
UpperCamelCase_ : Any = pipe.generate_mask(
image=self.raw_image , source_prompt=snake_case , target_prompt=snake_case , generator=snake_case , )
UpperCamelCase_ : int = pipe.invert(
prompt=snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=snake_case ).latents
UpperCamelCase_ : Optional[int] = pipe(
prompt=snake_case , mask_image=snake_case , image_latents=snake_case , generator=snake_case , negative_prompt=snake_case , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
UpperCamelCase_ : Any = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=snake_case , torch_dtype=torch.floataa )
UpperCamelCase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCamelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case )
UpperCamelCase_ : str = 'a bowl of fruit'
UpperCamelCase_ : int = 'a bowl of pears'
UpperCamelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=snake_case , target_prompt=snake_case , generator=snake_case , )
UpperCamelCase_ : Any = pipe.invert(
prompt=snake_case , image=self.raw_image , inpaint_strength=0.7 , generator=snake_case , num_inference_steps=2_5 , ).latents
UpperCamelCase_ : int = pipe(
prompt=snake_case , mask_image=snake_case , image_latents=snake_case , generator=snake_case , negative_prompt=snake_case , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type='numpy' , ).images[0]
UpperCamelCase_ : str = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 175 | import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a_ = False
try:
a_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _lowercase :
def __init__( self : Dict , snake_case : str = None , snake_case : list = [] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Optional[Any] = choices
UpperCamelCase_ : Any = prompt
if sys.platform == "win32":
UpperCamelCase_ : Optional[Any] = '*'
else:
UpperCamelCase_ : str = '➔ '
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : List[str] , snake_case : str = "" ) -> List[Any]:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , snake_case )
else:
forceWrite(self.choices[index] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : int ) -> List[Any]:
"""simple docstring"""
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(snake_case )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Direction , snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Any = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(snake_case )
move_cursor(snake_case , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(snake_case )] for number in range(1_0 )] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[Any] = int(chr(self.current_selection ) )
UpperCamelCase_ : Optional[int] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , snake_case )
else:
return
else:
return
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : int = 0 ) -> Union[str, Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
UpperCamelCase_ : Optional[int] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(snake_case )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
UpperCamelCase_ : Tuple = int(builtins.input() )
except ValueError:
UpperCamelCase_ : Tuple = default_choice
else:
UpperCamelCase_ : Optional[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(snake_case , '\n' )
return choice
| 175 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = JukeboxTokenizer
UpperCAmelCase_ : Any = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
import torch
lowerCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
lowerCAmelCase = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
import torch
lowerCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
lowerCAmelCase = tokenizer(**self.metas )['''input_ids''']
# fmt: off
lowerCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 338 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = (DEISMultistepScheduler,)
UpperCAmelCase_ : int = (("""num_inference_steps""", 25),)
def SCREAMING_SNAKE_CASE_ ( self , **__SCREAMING_SNAKE_CASE ) ->str:
lowerCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
pass
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
if scheduler is None:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = dict(self.forward_default_kwargs )
lowerCAmelCase = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_sample
lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase = scheduler.timesteps[5]
lowerCAmelCase = scheduler.timesteps[6]
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , algorithm_type='''deis''' , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , algorithm_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.full_loop()
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowerCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = 10
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 338 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Optional[Any]=18 , SCREAMING_SNAKE_CASE : List[Any]=30 , SCREAMING_SNAKE_CASE : Optional[Any]=400 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Tuple=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[Any]=[0.5, 0.5, 0.5] , ):
lowercase__ : List[str] = size if size is not None else {'''shortest_edge''': 18}
lowercase__ : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : int = image_size
lowercase__ : Dict = min_resolution
lowercase__ : List[str] = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : List[Any] = size
lowercase__ : Dict = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : int = do_normalize
lowercase__ : Optional[Any] = image_mean
lowercase__ : int = image_std
def snake_case ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__(__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowercase_ = LevitImageProcessor if is_vision_available() else None
def snake_case ( self : Tuple ):
lowercase__ : List[Any] = LevitImageProcessingTester(self )
@property
def snake_case ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : List[Any] ):
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def snake_case ( self : List[str] ):
pass
def snake_case ( self : List[Any] ):
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : int = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case ( self : List[Any] ):
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowercase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 367 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = "cpu" , lowerCamelCase__ = None ):
"""simple docstring"""
lowercase__ : Any = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCamelCase__ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowercase__ : int = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Optional[Any] = src_path
torch.save(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 121 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a =pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
inspect_dataset(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
inspect_metric(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Tuple = get_dataset_config_names(lowerCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Tuple = get_dataset_infos(lowerCamelCase__ )
assert list(infos.keys() ) == expected_configs
__lowerCamelCase : List[Any] = expected_configs[0]
assert expected_config in infos
__lowerCamelCase : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = get_dataset_infos(lowerCamelCase__ )
assert expected_config in infos
__lowerCamelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_split_names(lowerCamelCase__ , config_name=lowerCamelCase__ )
| 73 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a =open # noqa: we just need to have a builtin inside this module to test it properly
| 73 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = ShapEPipeline
UpperCAmelCase : str = ["""prompt"""]
UpperCAmelCase : int = ["""prompt"""]
UpperCAmelCase : List[str] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : str = False
@property
def __snake_case ( self : List[Any]):
return 32
@property
def __snake_case ( self : Optional[Any]):
return 32
@property
def __snake_case ( self : Optional[int]):
return self.time_input_dim * 4
@property
def __snake_case ( self : List[str]):
return 8
@property
def __snake_case ( self : List[Any]):
a : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase)
@property
def __snake_case ( self : Optional[int]):
torch.manual_seed(0)
a : Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
a : Tuple = PriorTransformer(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : Any):
torch.manual_seed(0)
a : int = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
a : Optional[Any] = ShapERenderer(**__UpperCAmelCase)
return model
def __snake_case ( self : int):
a : Any = self.dummy_prior
a : List[Any] = self.dummy_text_encoder
a : Optional[Any] = self.dummy_tokenizer
a : Optional[int] = self.dummy_renderer
a : Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a : Optional[Any] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]=0):
if str(__UpperCAmelCase).startswith("mps"):
a : Optional[Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[str] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : Optional[int] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def __snake_case ( self : List[Any]):
a : str = "cpu"
a : List[Any] = self.get_dummy_components()
a : int = self.pipeline_class(**__UpperCAmelCase)
a : str = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Any = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : str = output.images[0]
a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a : Union[str, Any] = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __snake_case ( self : Union[str, Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def __snake_case ( self : Dict):
a : str = torch_device == "cpu"
a : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __snake_case ( self : Union[str, Any]):
a : Dict = self.get_dummy_components()
a : Tuple = self.pipeline_class(**__UpperCAmelCase)
a : Dict = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = 1
a : Optional[int] = 2
a : Tuple = self.get_dummy_inputs(__UpperCAmelCase)
for key in inputs.keys():
if key in self.batch_params:
a : int = batch_size * [inputs[key]]
a : Any = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Union[str, Any]):
a : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
a : Union[str, Any] = ShapEPipeline.from_pretrained("openai/shap-e")
a : List[Any] = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(0)
a : Dict = pipe(
"a shark" , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 354 |
"""simple docstring"""
__lowercase = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowercase = frozenset(["""prompt""", """negative_prompt"""])
__lowercase = frozenset([])
__lowercase = frozenset(["""image"""])
__lowercase = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""image"""])
__lowercase = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowercase = frozenset(["""prompt""", """image""", """negative_prompt"""])
__lowercase = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowercase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
__lowercase = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""image""", """mask_image"""])
__lowercase = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""example_image""", """image""", """mask_image"""])
__lowercase = frozenset(["""class_labels"""])
__lowercase = frozenset(["""class_labels"""])
__lowercase = frozenset(["""batch_size"""])
__lowercase = frozenset([])
__lowercase = frozenset(["""batch_size"""])
__lowercase = frozenset([])
__lowercase = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowercase = frozenset(["""prompt""", """negative_prompt"""])
__lowercase = frozenset(["""input_tokens"""])
__lowercase = frozenset(["""input_tokens"""])
| 226 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase_( snake_case : Callable ):
'''simple docstring'''
@wraps(snake_case )
def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 85 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __get__( self , A , A=None ) -> Dict:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
_UpperCAmelCase : List[Any] = '''__cached_''' + self.fget.__name__
_UpperCAmelCase : Tuple = getattr(A , A , A )
if cached is None:
_UpperCAmelCase : Optional[Any] = self.fget(A )
setattr(A , A , A )
return cached
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
_UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def lowerCamelCase_ (UpperCamelCase__ : str ):
if is_torch_fx_proxy(UpperCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase__ , np.ndarray )
def lowerCamelCase_ (UpperCamelCase__ : str ):
return isinstance(UpperCamelCase__ , np.ndarray )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return _is_numpy(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
import torch
return isinstance(UpperCamelCase__ , torch.Tensor )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
return False if not is_torch_available() else _is_torch(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Any ):
import torch
return isinstance(UpperCamelCase__ , torch.device )
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
return False if not is_torch_available() else _is_torch_device(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Tuple ):
import torch
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
else:
return False
return isinstance(UpperCamelCase__ , torch.dtype )
def lowerCamelCase_ (UpperCamelCase__ : Tuple ):
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
import tensorflow as tf
return isinstance(UpperCamelCase__ , tf.Tensor )
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCamelCase__ )
return type(UpperCamelCase__ ) == tf.Tensor
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase__ , jnp.ndarray )
def lowerCamelCase_ (UpperCamelCase__ : Tuple ):
return False if not is_flax_available() else _is_jax(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
if isinstance(UpperCamelCase__ , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_py_obj(UpperCamelCase__ ) for o in obj]
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ ).tolist()
elif isinstance(UpperCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase_ (UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return np.array(UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ )
else:
return obj
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(A ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
_UpperCAmelCase : Union[str, Any] = getattr(self , class_fields[0].name )
_UpperCAmelCase : List[Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A ):
if isinstance(A , A ):
_UpperCAmelCase : List[str] = first_field.items()
_UpperCAmelCase : Any = True
else:
try:
_UpperCAmelCase : int = iter(A )
_UpperCAmelCase : Tuple = True
except TypeError:
_UpperCAmelCase : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A ):
if (
not isinstance(A , (list, tuple) )
or not len(A ) == 2
or not isinstance(element[0] , A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCAmelCase : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_UpperCAmelCase : Any = element[1]
elif first_field is not None:
_UpperCAmelCase : Any = first_field
else:
for field in class_fields:
_UpperCAmelCase : Dict = getattr(self , field.name )
if v is not None:
_UpperCAmelCase : Optional[Any] = v
def __delitem__( self , *A , **A ) -> Dict:
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self , *A , **A ) -> Optional[Any]:
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self , *A , **A ) -> Union[str, Any]:
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self , *A , **A ) -> Union[str, Any]:
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self , A ) -> Union[str, Any]:
if isinstance(A , A ):
_UpperCAmelCase : str = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , A , A ) -> List[Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A , A )
super().__setattr__(A , A )
def __setitem__( self , A , A ) -> List[Any]:
# Will raise a KeyException if needed
super().__setitem__(A , A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A , A )
def __lowerCAmelCase ( self ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class _UpperCAmelCase ( a ,a ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls , A ) -> Tuple:
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''longest'''
a__ ='''max_length'''
a__ ='''do_not_pad'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''pt'''
a__ ='''tf'''
a__ ='''np'''
a__ ='''jax'''
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A ) -> List[Any]:
_UpperCAmelCase : str = context_managers
_UpperCAmelCase : Optional[int] = ExitStack()
def __enter__( self ) -> int:
for context_manager in self.context_managers:
self.stack.enter_context(A )
def __exit__( self , *A , **A ) -> List[str]:
self.stack.__exit__(*A , **A )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : List[str] = infer_framework(UpperCamelCase__ )
if framework == "tf":
_UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : int = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = model_class.__name__
_UpperCAmelCase : Union[str, Any] = infer_framework(UpperCamelCase__ )
if framework == "tf":
_UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : List[str] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase_ (UpperCamelCase__ : MutableMapping , UpperCamelCase__ : str = "" , UpperCamelCase__ : str = "." ):
def _flatten_dict(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int="" , UpperCamelCase__ : Optional[Any]="." ):
for k, v in d.items():
_UpperCAmelCase : Tuple = str(UpperCamelCase__ ) + delimiter + str(UpperCamelCase__ ) if parent_key else k
if v and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
yield from flatten_dict(UpperCamelCase__ , UpperCamelCase__ , delimiter=UpperCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
@contextmanager
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=None ):
if is_numpy_array(UpperCamelCase__ ):
return np.transpose(UpperCamelCase__ , axes=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.T if axes is None else array.permute(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.transpose(UpperCamelCase__ , perm=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.transpose(UpperCamelCase__ , axes=UpperCamelCase__ )
else:
raise ValueError(F'Type not supported for transpose: {type(UpperCamelCase__ )}.' )
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ):
if is_numpy_array(UpperCamelCase__ ):
return np.reshape(UpperCamelCase__ , UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.reshape(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.reshape(UpperCamelCase__ , UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.reshape(UpperCamelCase__ , UpperCamelCase__ )
else:
raise ValueError(F'Type not supported for reshape: {type(UpperCamelCase__ )}.' )
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=None ):
if is_numpy_array(UpperCamelCase__ ):
return np.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
else:
raise ValueError(F'Type not supported for squeeze: {type(UpperCamelCase__ )}.' )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if is_numpy_array(UpperCamelCase__ ):
return np.expand_dims(UpperCamelCase__ , UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.unsqueeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.expand_dims(UpperCamelCase__ , axis=UpperCamelCase__ )
else:
raise ValueError(F'Type not supported for expand_dims: {type(UpperCamelCase__ )}.' )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
if is_numpy_array(UpperCamelCase__ ):
return np.size(UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.numel()
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.size(UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(UpperCamelCase__ )}.' )
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : int ):
for key, value in auto_map.items():
if isinstance(UpperCamelCase__ , (tuple, list) ):
_UpperCAmelCase : List[Any] = [F'{repo_id}--{v}' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCAmelCase : str = F'{repo_id}--{value}'
return auto_map
def lowerCamelCase_ (UpperCamelCase__ : str ):
for base_class in inspect.getmro(UpperCamelCase__ ):
_UpperCAmelCase : List[Any] = base_class.__module__
_UpperCAmelCase : Union[str, Any] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 68 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_lowerCAmelCase :int = get_logger(__name__)
class _UpperCAmelCase ( enum.Enum ):
'''simple docstring'''
a__ ='''all_checks'''
a__ ='''basic_checks'''
a__ ='''no_checks'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
def lowerCamelCase_ (UpperCamelCase__ : Optional[dict] , UpperCamelCase__ : dict , UpperCamelCase__ : Tuple=None ):
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_UpperCAmelCase : Optional[Any] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
_UpperCAmelCase : str = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(UpperCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
class _UpperCAmelCase ( a ):
'''simple docstring'''
def lowerCamelCase_ (UpperCamelCase__ : Optional[dict] , UpperCamelCase__ : dict ):
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
if len(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCamelCase__ ) - set(UpperCamelCase__ ) ) )
_UpperCAmelCase : Dict = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCamelCase__ ) )
logger.info('''All the splits matched successfully.''' )
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : bool = True ):
if record_checksum:
_UpperCAmelCase : Any = shaaaa()
with open(UpperCamelCase__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ):
m.update(UpperCamelCase__ )
_UpperCAmelCase : int = m.hexdigest()
else:
_UpperCAmelCase : Union[str, Any] = None
return {"num_bytes": os.path.getsize(UpperCamelCase__ ), "checksum": checksum}
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 68 | 1 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[0] * len(lowerCAmelCase_ )
for i in range(1, len(lowerCAmelCase_ ) ):
# use last results for better performance - dynamic programming
SCREAMING_SNAKE_CASE =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
SCREAMING_SNAKE_CASE =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
SCREAMING_SNAKE_CASE =j
return prefix_result
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return max(prefix_function(lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
SCREAMING_SNAKE_CASE : int = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
SCREAMING_SNAKE_CASE : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='whisper'
lowerCamelCase__ =['past_key_values']
lowerCamelCase__ ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , a_=5_18_65 , a_=80 , a_=6 , a_=4 , a_=6 , a_=4 , a_=15_36 , a_=15_36 , a_=0.0 , a_=0.0 , a_=5_02_57 , a_=True , a_=True , a_="gelu" , a_=2_56 , a_=0.0 , a_=0.0 , a_=0.0 , a_=0.02 , a_=False , a_=15_00 , a_=4_48 , a_=5_02_56 , a_=5_02_56 , a_=5_02_56 , a_=None , a_=[2_20, 5_02_56] , a_=False , a_=2_56 , a_=False , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_=7 , **a_ , ):
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : Optional[int] = num_mel_bins
__snake_case : int = d_model
__snake_case : Any = encoder_layers
__snake_case : int = encoder_attention_heads
__snake_case : List[Any] = decoder_layers
__snake_case : Any = decoder_attention_heads
__snake_case : Optional[int] = decoder_ffn_dim
__snake_case : Optional[Any] = encoder_ffn_dim
__snake_case : Tuple = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[int] = activation_dropout
__snake_case : List[str] = activation_function
__snake_case : Any = init_std
__snake_case : Tuple = encoder_layerdrop
__snake_case : Dict = decoder_layerdrop
__snake_case : Optional[Any] = use_cache
__snake_case : Optional[Any] = encoder_layers
__snake_case : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : Optional[Any] = max_source_positions
__snake_case : int = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__snake_case : List[Any] = classifier_proj_size
__snake_case : str = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : List[Any] = apply_spec_augment
__snake_case : List[Any] = mask_time_prob
__snake_case : int = mask_time_length
__snake_case : Any = mask_time_min_masks
__snake_case : Union[str, Any] = mask_feature_prob
__snake_case : Optional[Any] = mask_feature_length
__snake_case : List[str] = mask_feature_min_masks
__snake_case : Dict = median_filter_width
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , suppress_tokens=a_ , begin_suppress_tokens=a_ , **a_ , )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case : int = {0: '''batch'''}
else:
__snake_case : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(a_ , direction='''inputs''' )
return common_inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , a_ = 2_20_50 , a_ = 5.0 , a_ = 2_20 , ):
'''simple docstring'''
__snake_case : Tuple = OrderedDict()
__snake_case : List[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=a_ , framework=a_ , sampling_rate=a_ , time_duration=a_ , frequency=a_ , )
__snake_case : int = encoder_inputs['''input_features'''].shape[2]
__snake_case : Dict = encoder_sequence_length // 2 if self.use_past else seq_length
__snake_case : Dict = super().generate_dummy_inputs(
preprocessor.tokenizer , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = encoder_inputs.pop('''input_features''' )
__snake_case : str = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__snake_case : Union[str, Any] = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-3
| 24 |
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 1 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase_ ( a__ , a__ ):
@register_to_config
def __init__( self , a = 1_28 , a = 2_56 , a = 2000.0 , a = 7_68 , a = 12 , a = 12 , a = 64 , a = 20_48 , a = 0.1 , ):
super().__init__()
UpperCamelCase__ = nn.Sequential(
nn.Linear(a , d_model * 4 , bias=a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a ) , nn.SiLU() , )
UpperCamelCase__ = nn.Embedding(a , a )
UpperCamelCase__ = False
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Dropout(p=a )
UpperCamelCase__ = nn.ModuleList()
for lyr_num in range(a ):
# FiLM conditional T5 decoder
UpperCamelCase__ = DecoderLayer(d_model=a , d_kv=a , num_heads=a , d_ff=a , dropout_rate=a )
self.decoders.append(a )
UpperCamelCase__ = TaLayerNorm(a )
UpperCamelCase__ = nn.Dropout(p=a )
UpperCamelCase__ = nn.Linear(a , a , bias=a )
def __a ( self , a , a ):
UpperCamelCase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __a ( self , a , a , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase__ = self.conditioning_emb(a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase__ = torch.broadcast_to(
torch.arange(a , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase__ = self.position_encoding(a )
UpperCamelCase__ = self.continuous_inputs_projection(a )
inputs += position_encodings
UpperCamelCase__ = self.dropout(a )
# decoder: No padding present.
UpperCamelCase__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase__ = [(x, self.encoder_decoder_mask(a , a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase__ = lyr(
a , conditioning_emb=a , encoder_hidden_states=a , encoder_attention_mask=a , )[0]
UpperCamelCase__ = self.decoder_norm(a )
UpperCamelCase__ = self.post_dropout(a )
UpperCamelCase__ = self.spec_out(a )
return spec_out
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a , a , a=1e-6 ):
super().__init__()
UpperCamelCase__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a , d_kv=a , num_heads=a , dropout_rate=a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a , d_kv=a , num_heads=a , dropout_rate=a , layer_norm_epsilon=a , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a , d_ff=a , dropout_rate=a , layer_norm_epsilon=a ) )
def __a ( self , a , a=None , a=None , a=None , a=None , a=None , ):
UpperCamelCase__ = self.layer[0](
a , conditioning_emb=a , attention_mask=a , )
if encoder_hidden_states is not None:
UpperCamelCase__ = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCamelCase__ = self.layer[1](
a , key_value_states=a , attention_mask=a , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase__ = self.layer[-1](a , a )
return (hidden_states,)
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a ):
super().__init__()
UpperCamelCase__ = TaLayerNorm(a )
UpperCamelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
UpperCamelCase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
UpperCamelCase__ = nn.Dropout(a )
def __a ( self , a , a=None , a=None , ):
# pre_self_attention_layer_norm
UpperCamelCase__ = self.layer_norm(a )
if conditioning_emb is not None:
UpperCamelCase__ = self.FiLMLayer(a , a )
# Self-attention block
UpperCamelCase__ = self.attention(a )
UpperCamelCase__ = hidden_states + self.dropout(a )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a , a ):
super().__init__()
UpperCamelCase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
UpperCamelCase__ = TaLayerNorm(a , eps=a )
UpperCamelCase__ = nn.Dropout(a )
def __a ( self , a , a=None , a=None , ):
UpperCamelCase__ = self.layer_norm(a )
UpperCamelCase__ = self.attention(
a , encoder_hidden_states=a , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase__ = hidden_states + self.dropout(a )
return layer_output
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a ):
super().__init__()
UpperCamelCase__ = TaDenseGatedActDense(d_model=a , d_ff=a , dropout_rate=a )
UpperCamelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
UpperCamelCase__ = TaLayerNorm(a , eps=a )
UpperCamelCase__ = nn.Dropout(a )
def __a ( self , a , a=None ):
UpperCamelCase__ = self.layer_norm(a )
if conditioning_emb is not None:
UpperCamelCase__ = self.film(a , a )
UpperCamelCase__ = self.DenseReluDense(a )
UpperCamelCase__ = hidden_states + self.dropout(a )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a ):
super().__init__()
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Dropout(a )
UpperCamelCase__ = NewGELUActivation()
def __a ( self , a ):
UpperCamelCase__ = self.act(self.wi_a(a ) )
UpperCamelCase__ = self.wi_a(a )
UpperCamelCase__ = hidden_gelu * hidden_linear
UpperCamelCase__ = self.dropout(a )
UpperCamelCase__ = self.wo(a )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , a , a=1e-6 ):
super().__init__()
UpperCamelCase__ = nn.Parameter(torch.ones(a ) )
UpperCamelCase__ = eps
def __a ( self , a ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCamelCase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a )
UpperCamelCase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase_ ( nn.Module ):
def __a ( self , a ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(a , 3.0 )) ))
class lowercase_ ( nn.Module ):
def __init__( self , a , a ):
super().__init__()
UpperCamelCase__ = nn.Linear(a , out_features * 2 , bias=a )
def __a ( self , a , a ):
UpperCamelCase__ = self.scale_bias(a )
UpperCamelCase__ , UpperCamelCase__ = torch.chunk(a , 2 , -1 )
UpperCamelCase__ = x * (1 + scale) + shift
return x
| 80 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCamelCase__ ( a , a ) -> Any:
_A: Any = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
_A: Optional[int] = DatasetInfosDict.from_directory(a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCamelCase__ ( a , a ) -> Any:
_A: int = str(a )
dataset_info.write_to_directory(a )
_A: str = DatasetInfo.from_directory(a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a , '''dataset_info.json''' ) )
def lowerCamelCase__ ( ) -> Any:
_A: int = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
_A: Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_A: str = yaml.safe_dump(a )
_A: Optional[int] = yaml.safe_load(a )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase__ ( ) -> int:
_A: Union[str, Any] = DatasetInfo()
_A: Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def lowerCamelCase__ ( a , a ) -> Optional[int]:
_A: Optional[int] = str(a )
dataset_infos_dict.write_to_directory(a )
_A: Union[str, Any] = DatasetInfosDict.from_directory(a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_A: Optional[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_A: Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a , '''README.md''' ) )
| 121 | 0 |
"""simple docstring"""
import string
import numpy
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , __lowerCamelCase )
class UpperCamelCase_ :
_A : int = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_A : Dict = numpy.vectorize(lambda a_ : x % 36 )
_A : Optional[Any] = numpy.vectorize(A_ )
def __init__( self , snake_case__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCAmelCase = encrypt_key.shape[0]
def UpperCamelCase_ ( self , snake_case__ ) -> Optional[int]:
"""simple docstring"""
return self.key_string.index(_lowerCamelCase )
def UpperCamelCase_ ( self , snake_case__ ) -> int:
"""simple docstring"""
return self.key_string[round(_lowerCamelCase )]
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase = det % len(self.key_string )
UpperCAmelCase = len(self.key_string )
if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1:
UpperCAmelCase = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(_lowerCamelCase )
def UpperCamelCase_ ( self , snake_case__ ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = [char for char in text.upper() if char in self.key_string]
UpperCAmelCase = chars[-1]
while len(_lowerCamelCase ) % self.break_key != 0:
chars.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
def UpperCamelCase_ ( self , snake_case__ ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.process_text(text.upper() )
UpperCAmelCase = """"""
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase = text[i : i + self.break_key]
UpperCAmelCase = [self.replace_letters(_lowerCamelCase ) for char in batch]
UpperCAmelCase = numpy.array([vec] ).T
UpperCAmelCase = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[
0
]
UpperCAmelCase = """""".join(
self.replace_digits(_lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase = det % len(self.key_string )
UpperCAmelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCAmelCase = i
break
UpperCAmelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCamelCase ) )
def UpperCamelCase_ ( self , snake_case__ ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.make_decrypt_key()
UpperCAmelCase = self.process_text(text.upper() )
UpperCAmelCase = """"""
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase = text[i : i + self.break_key]
UpperCAmelCase = [self.replace_letters(_lowerCamelCase ) for char in batch]
UpperCAmelCase = numpy.array([vec] ).T
UpperCAmelCase = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0]
UpperCAmelCase = """""".join(
self.replace_digits(_lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = int(input("""Enter the order of the encryption key: """ ) )
UpperCAmelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(__lowerCamelCase ):
UpperCAmelCase = [int(__lowerCamelCase ) for x in input().split()]
hill_matrix.append(__lowerCamelCase )
UpperCAmelCase = HillCipher(numpy.array(__lowerCamelCase ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
UpperCAmelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
UpperCAmelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(__lowerCamelCase ) )
elif option == "2":
UpperCAmelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 368 |
"""simple docstring"""
lowerCAmelCase_ : Dict = {str(digit): digit**5 for digit in range(1_0)}
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase ) )
def _lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowerCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 248 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase :int = ''''''
lowerCAmelCase :str = ''''''
lowerCAmelCase :Tuple = ''''''
lowerCAmelCase :List[str] = 1 # (0 is vertical, 1 is horizontal)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = get_dataset(_UpperCAmelCase , _UpperCAmelCase )
print('Processing...' )
__magic_name__ : List[Any] = update_image_and_anno(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for index, image in enumerate(_UpperCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__magic_name__ : Any = random_chars(32 )
__magic_name__ : List[str] = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__magic_name__ : Optional[Any] = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg' , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Success {index+1}/{len(_UpperCAmelCase )} with {file_name}' )
__magic_name__ : Optional[Any] = []
for anno in new_annos[index]:
__magic_name__ : Union[str, Any] = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(_UpperCAmelCase )
with open(f'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Any = []
__magic_name__ : Any = []
for label_file in glob.glob(os.path.join(_UpperCAmelCase , '*.txt' ) ):
__magic_name__ : Optional[Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(_UpperCAmelCase ) as in_file:
__magic_name__ : List[str] = in_file.readlines()
__magic_name__ : Optional[Any] = os.path.join(_UpperCAmelCase , f'{label_name}.jpg' )
__magic_name__ : str = []
for obj_list in obj_lists:
__magic_name__ : str = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_UpperCAmelCase )
labels.append(_UpperCAmelCase )
return img_paths, labels
def lowerCamelCase ( lowerCAmelCase : list , lowerCAmelCase : list , lowerCAmelCase : int = 1 ):
"""simple docstring"""
__magic_name__ : Dict = []
__magic_name__ : Optional[Any] = []
__magic_name__ : Any = []
for idx in range(len(_UpperCAmelCase ) ):
__magic_name__ : Tuple = []
__magic_name__ : List[Any] = img_list[idx]
path_list.append(_UpperCAmelCase )
__magic_name__ : str = anno_list[idx]
__magic_name__ : str = cva.imread(_UpperCAmelCase )
if flip_type == 1:
__magic_name__ : Any = cva.flip(_UpperCAmelCase , _UpperCAmelCase )
for bbox in img_annos:
__magic_name__ : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__magic_name__ : Any = cva.flip(_UpperCAmelCase , _UpperCAmelCase )
for bbox in img_annos:
__magic_name__ : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_UpperCAmelCase )
new_imgs_list.append(_UpperCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def lowerCamelCase ( lowerCAmelCase : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__magic_name__ : Union[str, Any] = ascii_lowercase + digits
return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''') | 331 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = k_size // 2
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : Any = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : str = height - k_size + 1
__UpperCAmelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : str = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : Optional[Any] = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
__UpperCAmelCase : int = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : Tuple = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[Any] = ravel(_UpperCAmelCase )
# reshape and get the dst image
__UpperCAmelCase : Optional[Any] = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
__A =imread(R"../image_data/lena.jpg")
# turn image in gray scale value
__A =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__A =gaussian_filter(gray, 3, sigma=1)
__A =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 226 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a : Optional[Any] = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def __lowerCamelCase ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ) -> str:
UpperCAmelCase : Dict = True
while ask_again:
UpperCAmelCase : Any = input(_lowercase )
try:
if default is not None and len(_lowercase ) == 0:
return default
return convert_value(_lowercase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase=[] , _lowercase=None , _lowercase=0 ) -> List[str]:
UpperCAmelCase : int = BulletMenu(_lowercase , _lowercase )
UpperCAmelCase : List[str] = menu.run(default_choice=_lowercase )
return convert_value(_lowercase ) if convert_value is not None else result
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : Any = int(_lowercase )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Optional[int] = int(_lowercase )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Optional[Any] = int(_lowercase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : int = int(_lowercase )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Optional[int] = int(_lowercase )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def __lowerCamelCase ( _lowercase ) -> str:
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase_ ( argparse.RawDescriptionHelpFormatter ):
def _lowercase( self , A , A , A , A ) -> Dict:
UpperCAmelCase : Optional[Any] = super()._format_usage(A , A , A , A )
UpperCAmelCase : Tuple = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 338 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase_ :
lowercase = MBartConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Optional[Any] = eos_token_id
UpperCAmelCase : List[str] = pad_token_id
UpperCAmelCase : List[Any] = bos_token_id
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[Any] = prepare_mbart_inputs_dict(A , A , A )
return config, inputs_dict
def _lowercase( self , A , A ) -> List[str]:
UpperCAmelCase : List[str] = TFMBartModel(config=A ).get_decoder()
UpperCAmelCase : int = inputs_dict["""input_ids"""]
UpperCAmelCase : str = input_ids[:1, :]
UpperCAmelCase : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase : List[str] = inputs_dict["""head_mask"""]
UpperCAmelCase : List[Any] = 1
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , head_mask=A , use_cache=A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = outputs.to_tuple()
UpperCAmelCase : int = past_key_values[1]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[str]:
if attention_mask is None:
UpperCAmelCase : Tuple = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def _lowercase( self , A , A , A , A , A ) -> int:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = TFMBartModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A )
def _lowercase( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Dict:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowercase = 'facebook/mbart-large-en-ro'
@cached_property
def _lowercase( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase( self , **A ) -> Any:
UpperCAmelCase : Optional[int] = self.translate_src_text(**A )
self.assertListEqual(self.expected_text , A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.tokenizer(self.src_text , **A , return_tensors="""tf""" )
UpperCAmelCase : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase : Any = self.tokenizer.batch_decode(A , skip_special_tokens=A )
return generated_words
@slow
def _lowercase( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 338 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=64 , lowercase=5 , lowercase=4 , lowercase=64 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = MPNetModel(config=lowercase )
model.to(lowercase )
model.eval()
A__ = model(lowercase , lowercase )
A__ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
A__ = MPNetForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
A__ = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = MPNetForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
A__ = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.num_choices
A__ = MPNetForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
A__ = self.num_labels
A__ = MPNetForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
A__ = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = True
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = MPNetModelTester(self )
A__ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase )
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = MPNetModel.from_pretrained("microsoft/mpnet-base" )
A__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A__ = model(lowercase )[0]
A__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
A__ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
| 68 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> list:
'''simple docstring'''
A__ = int(SCREAMING_SNAKE_CASE_ )
if n_element < 1:
A__ = ValueError("a should be a positive number" )
raise my_error
A__ = [1]
A__ , A__ , A__ = (0, 0, 0)
A__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase__ = hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 68 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = False
lowercase__ = 3.0
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs(), {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs(), {"""a""": 2} )
self.assertDictEqual(MockClass(a=2, b=a_ ).to_kwargs(), {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2, c=2.25 ).to_kwargs(), {"""a""": 2, """c""": 2.25} )
@require_cuda
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : int = GradScalerKwargs(init_scale=1_024, growth_factor=2 )
AcceleratorState._reset_state()
_snake_case : List[Any] = Accelerator(mixed_precision="""fp16""", kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_snake_case : List[str] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale, 1_024.0 )
self.assertEqual(scaler._growth_factor, 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor, 0.5 )
self.assertEqual(scaler._growth_interval, 2_000 )
self.assertEqual(scaler._enabled, a_ )
@require_multi_gpu
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(a_, env=os.environ.copy() )
if __name__ == "__main__":
A_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A_ = Accelerator(kwargs_handlers=[ddp_scaler])
A_ = torch.nn.Linear(1_00, 2_00)
A_ = accelerator.prepare(model)
# Check the values changed in kwargs
A_ = ''''''
A_ = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 132 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[Any] = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ )
else:
_snake_case : Any = np.full((len(snake_case__ ), sequence_length) , snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Dict = tensor[:sequence_length]
else:
_snake_case : List[Any] = tensor[:sequence_length]
else:
if isinstance(snake_case__ , snake_case__ ):
_snake_case : str = tensor[:sequence_length]
else:
_snake_case : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : str = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_snake_case : Union[str, Any] = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -1_00
lowercase__ = "pt"
def UpperCamelCase_ ( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
import torch
_snake_case : Optional[Any] = """label""" if """label""" in features[0].keys() else """labels"""
_snake_case : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_snake_case : Any = self.tokenizer.pad(
a_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="""pt""" if labels is None else None, )
if labels is None:
return batch
_snake_case : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1]
_snake_case : Any = self.tokenizer.padding_side
if padding_side == "right":
_snake_case : Optional[int] = [
list(a_ ) + [self.label_pad_token_id] * (sequence_length - len(a_ )) for label in labels
]
else:
_snake_case : Union[str, Any] = [
[self.label_pad_token_id] * (sequence_length - len(a_ )) + list(a_ ) for label in labels
]
_snake_case : List[Any] = [feature["""ner_tags"""] for feature in features]
_snake_case : str = padding_tensor(a_, -1, a_, a_ )
_snake_case : Any = [feature["""original_entity_spans"""] for feature in features]
_snake_case : int = padding_tensor(a_, (-1, -1), a_, a_ )
_snake_case : str = {k: torch.tensor(a_, dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 132 | 1 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__(self : int , a__ : int , a__ : int , a__ : float = 0 ):
"""simple docstring"""
__snake_case , __snake_case = row, column
__snake_case = [[default_value for c in range(a__ )] for r in range(a__ )]
def __str__(self : Any ):
"""simple docstring"""
__snake_case = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
__snake_case = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case = max(a__ , len(str(a__ ) ) )
__snake_case = f"""%{max_element_length}s"""
# Make string and return
def single_line(a__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a__ ) for row_vector in self.array )
return s
def __repr__(self : List[Any] ):
"""simple docstring"""
return str(self )
def a (self : Union[str, Any] , a__ : tuple[int, int] ):
"""simple docstring"""
if not (isinstance(a__ , (list, tuple) ) and len(a__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self : Tuple , a__ : tuple[int, int] ):
"""simple docstring"""
assert self.validate_indicies(a__ )
return self.array[loc[0]][loc[1]]
def __setitem__(self : Optional[int] , a__ : tuple[int, int] , a__ : float ):
"""simple docstring"""
assert self.validate_indicies(a__ )
__snake_case = value
def __add__(self : Optional[Any] , a__ : Matrix ):
"""simple docstring"""
assert isinstance(a__ , a__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case = self[r, c] + another[r, c]
return result
def __neg__(self : str ):
"""simple docstring"""
__snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case = -self[r, c]
return result
def __sub__(self : Union[str, Any] , a__ : Matrix ):
"""simple docstring"""
return self + (-another)
def __mul__(self : List[str] , a__ : int | float | Matrix ):
"""simple docstring"""
if isinstance(a__ , (int, float) ): # Scalar multiplication
__snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case = self[r, c] * another
return result
elif isinstance(a__ , a__ ): # Matrix multiplication
assert self.column == another.row
__snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case = f"""Unsupported type given for another ({type(a__ )})"""
raise TypeError(a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case = self[r, c]
return result
def a (self : List[str] , a__ : Matrix , a__ : Matrix ):
"""simple docstring"""
assert isinstance(a__ , a__ ) and isinstance(a__ , a__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case = v.transpose()
__snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ) -> None:
# a^(-1)
__snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
__snake_case = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case = 1, 2, -3
__snake_case = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case_ , snake_case_ )}""" )
def lowerCamelCase__ ( ) -> None:
import doctest
doctest.testmod()
testa()
| 24 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class A__ ( lowerCAmelCase__ ):
"""simple docstring"""
__A : str = "M-CLIP"
def __init__( self , lowercase=1024 , lowercase=768 , **lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Dict = transformerDimSize
a__ : Optional[Any] = imageDimSize
super().__init__(**_SCREAMING_SNAKE_CASE)
class A__ ( lowerCAmelCase__ ):
"""simple docstring"""
__A : List[Any] = MCLIPConfig
def __init__( self , lowercase , *lowercase , **lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
a__ : List[str] = XLMRobertaModel(_SCREAMING_SNAKE_CASE)
a__ : List[str] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = self.transformer(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)[0]
a__ : Any = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_SCREAMING_SNAKE_CASE), embs
| 355 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A__(a_, unittest.TestCase ):
"""simple docstring"""
_A : Optional[Any] = MvpTokenizer
_A : List[Any] = MvpTokenizerFast
_A : Dict = True
_A : Optional[Any] = filter_roberta_detectors
def UpperCamelCase__ ( self ) -> Union[str, Any]:
super().setUp()
a_ : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
a_ : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
a_ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a_ : Optional[int] = {"""unk_token""": """<unk>"""}
a_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def UpperCamelCase__ ( self , **_lowercase ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCamelCase__ ( self , **_lowercase ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> int:
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase__ ( self ) -> List[Any]:
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def UpperCamelCase__ ( self ) -> List[str]:
a_ : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a_ : List[str] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : Optional[Any] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
a_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
# Test that special tokens are reset
@require_torch
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : List[str] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : List[str] = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : int = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def UpperCamelCase__ ( self ) -> Any:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : int = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Tuple = ["""A long paragraph for summarization."""]
a_ : Optional[Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a_ : List[Any] = tokenizer(_lowercase , text_target=_lowercase , return_tensors="""pt""" )
a_ : Union[str, Any] = inputs["""input_ids"""]
a_ : Dict = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCamelCase__ ( self ) -> int:
pass
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a_ : List[str] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
a_ : List[str] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
a_ : Optional[int] = """A, <mask> AllenNLP sentence."""
a_ : Union[str, Any] = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
a_ : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
a_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
a_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 248 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Dict=2 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=7 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[int]=99 , UpperCAmelCase : Dict=36 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : int=6 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Any=None , UpperCAmelCase : Any=1000 , ):
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = patch_size
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = coordinate_size
A_ = shape_size
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A_ = text_seq_length
A_ = (image_size // patch_size) ** 2 + 1
A_ = self.text_seq_length + self.image_seq_length
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
A_ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ = bbox[i, j, 3]
A_ = bbox[i, j, 1]
A_ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ = bbox[i, j, 2]
A_ = bbox[i, j, 0]
A_ = tmp_coordinate
A_ = tf.constant(UpperCAmelCase )
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.text_seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : str ):
A_ = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
A_ = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
A_ = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A_ = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A_ = model({"pixel_values": pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ):
A_ = self.num_labels
A_ = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ):
A_ = self.num_labels
A_ = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ):
A_ = 2
A_ = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[Any] ):
A_ = self.prepare_config_and_inputs()
((A_) , (A_) , (A_) , (A_) , (A_) , (A_) , (A_) , (A_)) = config_and_inputs
A_ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Optional[int] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowerCamelCase : Any = False
_lowerCamelCase : str = False
_lowerCamelCase : str = False
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ):
return True
def __A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple=False ):
A_ = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
A_ = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
A_ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
A_ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __A ( self : Union[str, Any] ):
A_ = TFLayoutLMvaModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : int ):
self.config_tester.run_common_tests()
def __A ( self : Optional[int] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , "hf_compute_loss" , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
A_ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
A_ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
A_ = prepared_for_class.pop("input_ids" )
A_ = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
A_ = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
A_ = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
A_ = -100
A_ = tf.convert_to_tensor(UpperCAmelCase )
A_ = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
A_ = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
A_ = prepared_for_class.keys() - inputs_dict.keys()
A_ = inspect.signature(model.call ).parameters
A_ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
A_ = {0: "input_ids"}
for label_key in label_keys:
A_ = signature_names.index(UpperCAmelCase )
A_ = label_key
A_ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
A_ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
A_ = prepared_for_class[value]
A_ = tuple(UpperCAmelCase )
# Send to model
A_ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __A ( self : Any ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[int] ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Any ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : int ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Union[str, Any] ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : Dict ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def __A ( self : int ):
A_ = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="tf" ).pixel_values
A_ = tf.constant([[1, 2]] )
A_ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
A_ = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
A_ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
A_ = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) ) | 351 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 329 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def SCREAMING_SNAKE_CASE_ ( ) -> int:
raise RuntimeError('''CUDA out of memory.''' )
class lowercase_ ( nn.Module ):
"""simple docstring"""
def __init__( self ) ->Any:
super().__init__()
lowerCAmelCase = nn.Linear(3 , 4 )
lowerCAmelCase = nn.BatchNormad(4 )
lowerCAmelCase = nn.Linear(4 , 5 )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) )
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE ):
nonlocal batch_sizes
batch_sizes.append(__SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
nonlocal batch_sizes
batch_sizes.append(__SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase , lowerCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE ):
pass
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = torch.cuda.memory_allocated()
lowerCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = release_memory(__SCREAMING_SNAKE_CASE )
self.assertEqual(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
| 338 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Union[str, Any]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
if issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = text_path
elif issubclass(snake_case__ , snake_case__ ):
lowerCAmelCase = [text_path]
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Optional[Any]:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = features.copy() if features else default_expected_features
lowerCAmelCase = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase = TextDatasetReader({'''train''': text_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
if split:
lowerCAmelCase = {split: text_path}
else:
lowerCAmelCase = '''train'''
lowerCAmelCase = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase = tmp_path / '''cache'''
lowerCAmelCase = {'''text''': '''string'''}
lowerCAmelCase = TextDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_text_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 338 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = 'pytorch_model.bin'
@dataclasses.dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
lowerCamelCase :Optional[str] = dataclasses.field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
lowerCamelCase :str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
lowerCamelCase :Optional[str] = dataclasses.field(
default=__lowerCAmelCase , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
lowerCamelCase :Optional[str] = dataclasses.field(
default=__lowerCAmelCase , metadata={'''help''': '''The name of the task to train on.'''} , )
lowerCamelCase :Optional[List[str]] = dataclasses.field(
default=__lowerCAmelCase , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class a :
"""simple docstring"""
lowerCamelCase :str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
lowerCamelCase :Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
lowerCamelCase :Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
lowerCamelCase :Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
lowerCamelCase :Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
lowerCamelCase :Optional[bool] = dataclasses.field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
lowerCamelCase :Optional[bool] = dataclasses.field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
lowerCamelCase :Optional[bool] = dataclasses.field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
lowerCamelCase :Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
lowerCamelCase :Optional[int] = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
lowerCamelCase :Optional[int] = dataclasses.field(
default=__lowerCAmelCase , metadata={'''help''': '''Random seed for initialization.'''} , )
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[int] , snake_case__ :Tuple , snake_case__ :List[Any] , snake_case__ :Optional[Any] , snake_case__ :Optional[int]) -> Tuple:
_A = datasets.concatenate_datasets([infer_input, infer_output] , axis=1)
if args.do_filter_by_confidence:
_A = dataset.filter(lambda snake_case__: example["probability"] > args.confidence_threshold)
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_A = int(eval_result * len(snake_case__))
print(snake_case__)
_A = dataset.sort("""probability""" , reverse=snake_case__)
_A = dataset.select(range(snake_case__))
_A = dataset.remove_columns(["""label""", """probability"""])
_A = dataset.rename_column("""prediction""" , """label""")
_A = dataset.map(lambda snake_case__: {"label": idalabel[example["label"]]})
_A = dataset.shuffle(seed=args.seed)
_A = os.path.join(snake_case__ , F'''train_pseudo.{args.data_file_extension}''')
if args.data_file_extension == "csv":
dataset.to_csv(snake_case__ , index=snake_case__)
else:
dataset.to_json(snake_case__)
def snake_case ( snake_case__ :List[Any] , snake_case__ :Union[str, Any] , snake_case__ :List[Any] , snake_case__ :str , **snake_case__ :int) -> Optional[Any]:
_A = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_A = STModelArguments(model_name_or_path=snake_case__)
_A = STDataArguments(train_file=snake_case__ , infer_file=snake_case__)
_A = STTrainingArguments(output_dir=snake_case__)
_A = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(snake_case__).items():
setattr(snake_case__ , snake_case__ , snake_case__)
for key, value in kwargs.items():
if hasattr(snake_case__ , snake_case__):
setattr(snake_case__ , snake_case__ , snake_case__)
# Sanity checks
_A = {}
_A = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_A = args.train_file
_A = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_A = args.eval_file
for key in data_files:
_A = data_files[key].split(""".""")[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
_A = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
logger.info("""Creating the initial data directory for self-training...""")
_A = F'''{args.output_dir}/self-train_iter-{{}}'''.format
_A = data_dir_format(0)
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=snake_case__)
os.makedirs(snake_case__ , exist_ok=snake_case__)
accelerator.wait_for_everyone()
_A = None
_A = None
_A = 0
_A = False
# Show the progress bar
_A = tqdm(range(args.max_selftrain_iterations) , disable=not accelerator.is_local_main_process)
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations)):
_A = data_dir_format(snake_case__)
assert os.path.exists(snake_case__)
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_A = os.path.join(snake_case__ , """stage-1""")
_A = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(snake_case__ , snake_case__):
arguments_dict.update({key: value})
_A = os.path.join(snake_case__ , """best-checkpoint""" , snake_case__)
if os.path.exists(snake_case__):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , snake_case__ , snake_case__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , snake_case__)
finetune(**snake_case__)
accelerator.wait_for_everyone()
assert os.path.exists(snake_case__)
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , snake_case__)
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_A = os.path.join(snake_case__ , """best-checkpoint""")
_A = os.path.join(snake_case__ , """stage-2""")
# Update arguments_dict
_A = model_path
_A = data_files["""train"""]
_A = current_output_dir
_A = os.path.join(snake_case__ , """best-checkpoint""" , snake_case__)
if os.path.exists(snake_case__):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , snake_case__ , snake_case__ , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , snake_case__)
finetune(**snake_case__)
accelerator.wait_for_everyone()
assert os.path.exists(snake_case__)
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , snake_case__)
_A = iteration
_A = data_dir_format(iteration + 1)
_A = AutoConfig.from_pretrained(os.path.join(snake_case__ , """best-checkpoint"""))
_A = config.idalabel
_A = os.path.join(snake_case__ , """eval_results_best-checkpoint.json""")
_A = os.path.join(snake_case__ , """test_results_best-checkpoint.json""")
assert os.path.exists(snake_case__)
with open(snake_case__ , """r""") as f:
_A = float(json.load(snake_case__)[args.eval_metric])
_A = os.path.join(snake_case__ , """infer_output_best-checkpoint.csv""")
assert os.path.exists(snake_case__)
# Loading the dataset from local csv or json files.
_A = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]})["""data"""]
_A = load_dataset("""csv""" , data_files={"""data""": infer_output_file})["""data"""]
if accelerator.is_main_process:
os.makedirs(snake_case__ , exist_ok=snake_case__)
shutil.copy(snake_case__ , os.path.join(snake_case__ , F'''eval_results_iter-{iteration}.json'''))
if os.path.exists(snake_case__):
shutil.copy(snake_case__ , os.path.join(snake_case__ , F'''test_results_iter-{iteration}.json'''))
create_pseudo_labeled_data(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
accelerator.wait_for_everyone()
_A = os.path.join(snake_case__ , F'''train_pseudo.{args.data_file_extension}''')
if args.evaluation_strategy != IntervalStrategy.NO.value:
_A = eval_result
if best_iteration is None:
_A = new_iteration
_A = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_A = new_iteration
_A = new_eval_result
_A = 0
else:
if new_eval_result == best_eval_result:
_A = new_iteration
_A = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_A = True
progress_bar.update(1)
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , snake_case__)
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , snake_case__)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(snake_case__ , F'''eval_results_iter-{iteration}.json''') , os.path.join(snake_case__ , """eval_results_best-iteration.json""") , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1)
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , snake_case__)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(snake_case__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''') , os.path.join(snake_case__ , """eval_results_best-iteration.json""") , )
| 81 | from math import isqrt
def snake_case ( snake_case__ :int) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__):
_A = False
return [i for i in range(2 , snake_case__) if is_prime[i]]
def snake_case ( snake_case__ :int = 10**8) -> int:
_A = calculate_prime_numbers(max_number // 2)
_A = 0
_A = 0
_A = len(snake_case__) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 81 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a :Optional[int] = logging.get_logger(__name__)
a :Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
a :int = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : int = {}
with open(__lowerCAmelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = line.strip()
if line:
SCREAMING_SNAKE_CASE__ : List[Any] = line.split()
SCREAMING_SNAKE_CASE__ : List[str] = line_number
SCREAMING_SNAKE_CASE__ : int = words[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = value
return result
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
SCREAMING_SNAKE_CASE__ : List[str] = """param"""
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ : str = hf_pointer
for attribute in hf_param_name.split(""".""" ):
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value[0]
else:
SCREAMING_SNAKE_CASE__ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : Any = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : List[str] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
SCREAMING_SNAKE_CASE__ : List[str] = getattr(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = value
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = PARAM_MAPPING[full_name.split(""".""" )[-1]]
SCREAMING_SNAKE_CASE__ : int = """param"""
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ : int = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ : List[Any] = """.""".join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = key
SCREAMING_SNAKE_CASE__ : int = value if """lm_head""" in full_key else value[0]
a :Optional[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
SCREAMING_SNAKE_CASE__ : List[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """weight_v"""
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : int = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ : Optional[int] = """weight"""
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if hf_dict is not None:
rename_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return is_used
return is_used
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE__ : Dict = load_wavaveca_layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] = full_name.split("""conv_layers.""" )[-1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.split(""".""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
SCREAMING_SNAKE_CASE__ : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False ) -> int:
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE__ : Optional[Any] = read_txt_into_dict(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = idalabel
SCREAMING_SNAKE_CASE__ : List[str] = WavaVecaForSequenceClassification(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
feature_extractor.save_pretrained(__lowerCAmelCase )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ : Tuple = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ : Dict = target_dict.pad_index
SCREAMING_SNAKE_CASE__ : List[Any] = target_dict.bos_index
SCREAMING_SNAKE_CASE__ : Optional[int] = target_dict.eos_index
SCREAMING_SNAKE_CASE__ : Optional[int] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Dict = True if config.feat_extract_norm == """layer""" else False
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : str = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = WavaVecaForCTC(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaForPreTraining(__lowerCAmelCase )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.Namespace(task="""audio_pretraining""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fairseq.tasks.setup_task(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
a :Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
a :Optional[Any] = parser.parse_args()
a :Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 132 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Generator[tuple[str, ...], None, None]:
SCREAMING_SNAKE_CASE__ : List[Any] = iter(__lowerCAmelCase )
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) )
if not chunk:
return
yield chunk
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE__ : Tuple = """"""
if len(__lowerCAmelCase ) < 2:
return dirty
for i in range(len(__lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowerCAmelCase ) & 1:
clean += "X"
return clean
def _lowercase ( __lowerCAmelCase ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
SCREAMING_SNAKE_CASE__ : str = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE__ : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowerCAmelCase )
return table
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = generate_table(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_input(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = divmod(table.index(__lowerCAmelCase ) , 5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : str = generate_table(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = divmod(table.index(__lowerCAmelCase ) , 5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 132 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCamelCase : Any ="convnextv2"
def __init__( self : List[str] , a : str=3 , a : int=4 , a : Optional[int]=4 , a : Optional[int]=None , a : Tuple=None , a : int="gelu" , a : Dict=0.02 , a : Any=1e-1_2 , a : str=0.0 , a : List[str]=2_24 , a : Optional[Any]=None , a : Any=None , **a : List[Any] , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = num_stages
__lowerCamelCase = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
__lowerCamelCase = [3, 3, 9, 3] if depths is None else depths
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = drop_path_rate
__lowerCamelCase = image_size
__lowerCamelCase = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__lowerCamelCase , __lowerCamelCase = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
| 237 | '''simple docstring'''
__UpperCAmelCase ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase = input('''Enter message: ''' )
__lowerCamelCase = input('''Enter key [alphanumeric]: ''' )
__lowerCamelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__lowerCamelCase = '''encrypt'''
__lowerCamelCase = encrypt_message(UpperCamelCase__ , UpperCamelCase__ )
elif mode.lower().startswith('''d''' ):
__lowerCamelCase = '''decrypt'''
__lowerCamelCase = decrypt_message(UpperCamelCase__ , UpperCamelCase__ )
print(f"""\n{mode.title()}ed message:""" )
print(UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
return translate_message(UpperCamelCase__ , UpperCamelCase__ , '''encrypt''' )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
return translate_message(UpperCamelCase__ , UpperCamelCase__ , '''decrypt''' )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = key.upper()
for symbol in message:
__lowerCamelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCamelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCamelCase__ ):
__lowerCamelCase = 0
else:
translated.append(UpperCamelCase__ )
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 237 | 1 |
"""simple docstring"""
from __future__ import annotations
class __A :
def __init__( self , a__=None ):
_lowerCAmelCase : Optional[Any] = data
_lowerCAmelCase : Dict = None
def __repr__( self ):
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = self
while temp:
string_rep.append(F"{temp.data}" )
_lowerCAmelCase : str = temp.next
return "->".join(a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> List[Any]:
if not elements_list:
raise Exception("""The Elements List is empty""" )
_lowerCAmelCase : Dict = Node(elements_list[0] )
for i in range(1 ,len(_lowerCamelCase ) ):
_lowerCAmelCase : Tuple = Node(elements_list[i] )
_lowerCAmelCase : List[Any] = current.next
return head
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node ) -> None:
if head_node is not None and isinstance(_lowerCamelCase ,_lowerCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
from doctest import testmod
testmod()
_lowerCAmelCase : Dict = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(_lowerCamelCase )
print("""Elements in Reverse:""" )
print_reverse(_lowerCamelCase )
if __name__ == "__main__":
main()
| 44 |
from __future__ import annotations
from math import gcd
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return (pow(__UpperCAmelCase , 2 ) + step) % modulus
for _ in range(__UpperCAmelCase ):
# These track the position within the cycle detection logic.
SCREAMING_SNAKE_CASE_ = seed
SCREAMING_SNAKE_CASE_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
SCREAMING_SNAKE_CASE_ = gcd(hare - tortoise , __UpperCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
SCREAMING_SNAKE_CASE_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
lowerCamelCase__ : Tuple = parser.parse_args()
lowerCamelCase__ : Any = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
lowerCamelCase__ : Tuple = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''') | 225 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """AutoTokenizer"""
lowercase_ = ["""tokenizer"""]
lowercase_ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=None ):
super().__init__(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = speaker_embeddings
@classmethod
def snake_case ( cls : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any="speaker_embeddings_path.json" , **SCREAMING_SNAKE_CASE : List[str] ):
if speaker_embeddings_dict_path is not None:
lowercase__ : int = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , subfolder=kwargs.pop("subfolder" , SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("cache_dir" , SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("force_download" , SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("proxies" , SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("resume_download" , SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("local_files_only" , SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("use_auth_token" , SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("revision" , SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
lowercase__ : int = None
else:
with open(SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowercase__ : Optional[int] = json.load(SCREAMING_SNAKE_CASE )
else:
lowercase__ : int = None
lowercase__ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return cls(tokenizer=SCREAMING_SNAKE_CASE , speaker_embeddings=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple="speaker_embeddings_path.json" , SCREAMING_SNAKE_CASE : Optional[int]="speaker_embeddings" , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : Any , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "v2" ) , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[int] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ : str = self._load_voice_preset(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , SCREAMING_SNAKE_CASE , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=SCREAMING_SNAKE_CASE , )
lowercase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE , f"""{prompt_key}_{key}.npy""" )
lowercase__ : Optional[int] = tmp_dict
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "w" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
super().save_pretrained(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str = None , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : str = self.speaker_embeddings[voice_preset]
lowercase__ : str = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
lowercase__ : List[Any] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("cache_dir" , SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("force_download" , SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("proxies" , SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("resume_download" , SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("local_files_only" , SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("use_auth_token" , SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("revision" , SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
lowercase__ : str = np.load(SCREAMING_SNAKE_CASE )
return voice_preset_dict
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Any="pt" , SCREAMING_SNAKE_CASE : List[str]=256 , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Any=False , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
if voice_preset is not None and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ : Union[str, Any] = self._load_voice_preset(SCREAMING_SNAKE_CASE )
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not voice_preset.endswith(".npz" ):
lowercase__ : int = voice_preset + ".npz"
lowercase__ : Any = np.load(SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , padding="max_length" , max_length=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowercase__ : Tuple = voice_preset
return encoded_text
| 121 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class snake_case__(Generic[KEY, VAL] ):
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
class snake_case__(_Item ):
"""simple docstring"""
def __init__( self : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __bool__( self : Tuple ):
return False
lowerCAmelCase__ = _DeletedItem()
class snake_case__(MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int = 8 , SCREAMING_SNAKE_CASE : float = 0.75 ):
lowercase__ : Any = initial_block_size
lowercase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase__ : Dict = capacity_factor
lowercase__ : Optional[int] = 0
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : KEY ):
return hash(SCREAMING_SNAKE_CASE ) % len(self._buckets )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
return (ind + 1) % len(self._buckets )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
lowercase__ : Tuple = self._buckets[ind]
if not stored:
lowercase__ : int = _Item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self._len += 1
return True
elif stored.key == key:
lowercase__ : str = _Item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return True
else:
return False
def snake_case ( self : str ):
lowercase__ : str = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase__ : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Tuple = self._buckets
lowercase__ : Optional[int] = [None] * new_size
lowercase__ : int = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def snake_case ( self : int ):
self._resize(len(self._buckets ) * 2 )
def snake_case ( self : Optional[Any] ):
self._resize(len(self._buckets ) // 2 )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : KEY ):
lowercase__ : Tuple = self._get_bucket_index(SCREAMING_SNAKE_CASE )
for _ in range(len(self._buckets ) ):
yield ind
lowercase__ : Union[str, Any] = self._get_next_ind(SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
if self._try_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
break
def __setitem__( self : List[str] , SCREAMING_SNAKE_CASE : KEY , SCREAMING_SNAKE_CASE : VAL ):
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __delitem__( self : int , SCREAMING_SNAKE_CASE : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE )
if item is _deleted:
continue
if item.key == key:
lowercase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple , SCREAMING_SNAKE_CASE : KEY ):
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE )
def __len__( self : Optional[Any] ):
return self._len
def __iter__( self : List[str] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : str ):
lowercase__ : int = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 121 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __A( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=True , _snake_case=[0.5, 0.5, 0.5] , _snake_case=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size if size is not None else {'''height''': 18, '''width''': 20}
__a = do_thumbnail
__a = do_align_axis
__a = do_pad
__a = do_normalize
__a = image_mean
__a = image_std
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A( a , unittest.TestCase ):
snake_case_ = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_pad''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''image_std''' ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , ) | 6 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 | 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase__ ( lowerCamelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ( ) -> Iterator[int]:
lowercase :List[str] = 2
while True:
if is_prime(UpperCAmelCase__ ):
yield num
num += 1
def UpperCAmelCase__ ( lowerCamelCase = 2000000 ) -> int:
return sum(takewhile(lambda lowerCamelCase : x < n, prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 371 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
def __init__( self: Dict , _lowerCAmelCase: Dict , _lowerCAmelCase: Any=13 , _lowerCAmelCase: List[str]=10 , _lowerCAmelCase: Dict=3 , _lowerCAmelCase: Dict=2 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[Any]=32 , _lowerCAmelCase: Union[str, Any]=5 , _lowerCAmelCase: str=4 , _lowerCAmelCase: str=37 , _lowerCAmelCase: Any="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: List[str]=0.02 , _lowerCAmelCase: Union[str, Any]=0.9 , _lowerCAmelCase: int=None , ):
lowercase :Dict = parent
lowercase :Optional[int] = batch_size
lowercase :List[Any] = image_size
lowercase :int = num_channels
lowercase :Any = patch_size
lowercase :str = tubelet_size
lowercase :Optional[Any] = num_frames
lowercase :Optional[Any] = is_training
lowercase :Tuple = use_labels
lowercase :Union[str, Any] = hidden_size
lowercase :Any = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[int] = intermediate_size
lowercase :Union[str, Any] = hidden_act
lowercase :int = hidden_dropout_prob
lowercase :List[str] = attention_probs_dropout_prob
lowercase :List[str] = type_sequence_label_size
lowercase :Union[str, Any] = initializer_range
lowercase :Optional[Any] = mask_ratio
lowercase :List[Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase :List[str] = (image_size // patch_size) ** 2
lowercase :Dict = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase :Optional[Any] = int(mask_ratio * self.seq_length )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :Tuple = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase :Dict = None
if self.use_labels:
lowercase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self: List[str] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict ):
lowercase :List[Any] = VideoMAEModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :int = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Optional[int] ):
lowercase :str = VideoMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase :Tuple = torch.ones((self.num_masks,) )
lowercase :str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase :Optional[int] = mask.expand(self.batch_size , -1 ).bool()
lowercase :List[Any] = model(_lowerCAmelCase , _lowerCAmelCase )
# model only returns predictions for masked patches
lowercase :Any = mask.sum().item()
lowercase :Optional[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Union[str, Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase :str = config_and_inputs
lowercase :Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_a = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :str = VideoMAEModelTester(self )
lowercase :str = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any]=False ):
lowercase :Union[str, Any] = copy.deepcopy(_lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase :Tuple = torch.ones((self.model_tester.num_masks,) )
lowercase :Optional[Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase :List[Any] = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase :Optional[int] = bool_masked_pos.to(_lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(_lowerCAmelCase ),
]:
lowercase :List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self: str ):
pass
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase , lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase :List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase , lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Tuple = model_class(_lowerCAmelCase )
lowercase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Optional[int] = [*signature.parameters.keys()]
lowercase :List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self: Any ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase :int = VideoMAEModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
if not self.has_attentions:
pass
else:
lowercase , lowercase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :Optional[Any] = True
for model_class in self.all_model_classes:
lowercase :Tuple = self.model_tester.seq_length - self.model_tester.num_masks
lowercase :Dict = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase :Any = True
lowercase :Tuple = False
lowercase :str = True
lowercase :List[Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase :List[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase :Any = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase :Optional[Any] = True
lowercase :str = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase :Any = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase :List[Any] = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase :int = len(_lowerCAmelCase )
# Check attention is always last and order is fine
lowercase :int = True
lowercase :Union[str, Any] = True
lowercase :int = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase :Optional[int] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) )
lowercase :Tuple = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self: str ):
def check_hidden_states_output(_lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple ):
lowercase :Dict = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase :Tuple = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase :Tuple = outputs.hidden_states
lowercase :Any = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
lowercase :str = self.model_tester.seq_length - self.model_tester.num_masks
lowercase :List[str] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase , lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Any = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase :Optional[Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
pass
def UpperCAmelCase__ ( ):
lowercase :str = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" )
lowercase :List[str] = np.load(lowerCamelCase )
return list(lowerCamelCase )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self: Dict ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Tuple = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
_lowerCAmelCase )
lowercase :Tuple = self.default_image_processor
lowercase :Optional[Any] = prepare_video()
lowercase :str = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase :List[str] = model(**_lowerCAmelCase )
# verify the logits
lowercase :Optional[int] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase :Optional[int] = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :List[str] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(_lowerCAmelCase )
lowercase :List[Any] = self.default_image_processor
lowercase :str = prepare_video()
lowercase :Optional[int] = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# add boolean mask, indicating which patches to mask
lowercase :Optional[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
lowercase :str = torch.load(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase :Optional[Any] = model(**_lowerCAmelCase )
# verify the logits
lowercase :str = torch.Size([1, 14_08, 15_36] )
lowercase :Union[str, Any] = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=_lowerCAmelCase )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase :Union[str, Any] = torch.tensor([0.51_42] , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase :Any = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=_lowerCAmelCase ).to(
_lowerCAmelCase )
with torch.no_grad():
lowercase :List[str] = model(**_lowerCAmelCase )
lowercase :Tuple = torch.tensor(torch.tensor([0.64_69] ) , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1e-4 ) )
| 158 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be trained."} )
__lowerCAmelCase = field(
default="./", metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path of training dataset."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} )
__lowerCAmelCase = field(default=2, metadata={"help": "Batch size for training."} )
__lowerCAmelCase = field(default=2, metadata={"help": "Batch size for evaluation."} )
__lowerCAmelCase = field(default=0.1, metadata={"help": "Value of weight decay."} )
__lowerCAmelCase = field(
default=10000, metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
__lowerCAmelCase = field(default=2e-4, metadata={"help": "Learning rate fo training."} )
__lowerCAmelCase = field(default="cosine", metadata={"help": "Learning rate."} )
__lowerCAmelCase = field(
default=750, metadata={"help": "Number of warmup steps in the learning rate schedule."} )
__lowerCAmelCase = field(
default=16, metadata={"help": "Number of gradient accumulation steps."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
__lowerCAmelCase = field(default=50000, metadata={"help": "Maximum number of training steps."} )
__lowerCAmelCase = field(
default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
__lowerCAmelCase = field(default=1024, metadata={"help": "Sequence lengths used for training."} )
__lowerCAmelCase = field(default=1, metadata={"help": "Training seed."} )
__lowerCAmelCase = field(
default=1024, metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "States path if the training should continue from a checkpoint folder."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} )
__lowerCAmelCase = field(default=2, metadata={"help": "Batch size used for evaluation."} )
__lowerCAmelCase = field(
default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
__lowerCAmelCase = field(default=1024, metadata={"help": "Length of sequences to be evaluated."} )
__lowerCAmelCase = field(default=1, metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Number of workers used for code evaluation."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Sample from the language model's output distribution."} )
__lowerCAmelCase = field(default=0.2, metadata={"help": "Sampling temperature used for generation."} )
__lowerCAmelCase = field(default=256, metadata={"help": "Maximum number of newly generated tokens."} )
__lowerCAmelCase = field(default=0, metadata={"help": "Top-k parameter used for generation."} )
__lowerCAmelCase = field(default=0.9_5, metadata={"help": "Top-p parameter used for nucleus sampling."} )
__lowerCAmelCase = field(default=10, metadata={"help": "Number of generations to run in parallel."} )
__lowerCAmelCase = field(
default=200, metadata={"help": "Number of completions to generate for each sample."} )
__lowerCAmelCase = field(default=1, metadata={"help": "Random seed used for evaluation."} )
__lowerCAmelCase = field(
default="eval_results.json", metadata={"help": "Random seed used for evaluation."} )
__lowerCAmelCase = field(
default="0", metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
__lowerCAmelCase = field(
default=-1, metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
}, )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
}, )
__lowerCAmelCase = field(
default="transformersbook/codeparrot", metadata={"help": "Folder or name of dataset to process."} )
__lowerCAmelCase = field(
default="codeparrot-clean", metadata={"help": "Folder to save processed processed dataset."} )
__lowerCAmelCase = field(
default=100000, metadata={"help": "Number of files to save per JSON output file."} )
__lowerCAmelCase = field(default="content", metadata={"help": "Column containing text data to process."} )
__lowerCAmelCase = field(
default=1000, metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=100, metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=0.2_5, metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=1.5, metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
__lowerCAmelCase = field(
default=0.7, metadata={"help": "Probability for filtering config, test and uncommon files."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "If True, near-duplicate samples are removed."} )
__lowerCAmelCase = field(
default=0.8_5, metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="gpt2", metadata={"help": "Base tokenizer to build new tokenizer from."} )
__lowerCAmelCase = field(
default="transformersbook/codeparrot-train", metadata={"help": "Dataset to train tokenizer on."} )
__lowerCAmelCase = field(default="content", metadata={"help": "Column containing text data to process."} )
__lowerCAmelCase = field(default=200000, metadata={"help": "Number of examples to train tokenizer on."} )
__lowerCAmelCase = field(
default=32768, metadata={"help": "Number of examples to train the tokenizer on."} )
__lowerCAmelCase = field(default="codeparrot", metadata={"help": "Name of new tokenizer."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path to the dataset to pretokenize."} )
__lowerCAmelCase = field(
default="tokenized-codeparrot-train", metadata={"help": "Repo name of the pretokenized data."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="gpt2-large", metadata={"help": "Configuration to use for model initialization."} )
__lowerCAmelCase = field(
default="codeparrot/codeparrot", metadata={"help": "Tokenizer attached to model."} )
__lowerCAmelCase = field(default="codeparrot", metadata={"help": "Name of the created model."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Push saved tokenizer to the hub."} ) | 81 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = BertTokenizer
__lowerCAmelCase = BertTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = filter_non_english
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
a =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Union[str, Any]:
a ='''UNwant\u00E9d,running'''
a ='''unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.tokenizer_class(self.vocab_file )
a =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
a =self.get_tokenizer()
a =self.get_rust_tokenizer()
a ='''UNwant\u00E9d,running'''
a =tokenizer.tokenize(__A )
a =rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
a =tokenizer.encode(__A , add_special_tokens=__A )
a =rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
a =self.get_rust_tokenizer()
a =tokenizer.encode(__A )
a =rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
# With lower casing
a =self.get_tokenizer(do_lower_case=__A )
a =self.get_rust_tokenizer(do_lower_case=__A )
a ='''UNwant\u00E9d,running'''
a =tokenizer.tokenize(__A )
a =rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
a =tokenizer.encode(__A , add_special_tokens=__A )
a =rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
a =self.get_rust_tokenizer()
a =tokenizer.encode(__A )
a =rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =BasicTokenizer(do_lower_case=__A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =BasicTokenizer()
a ='''a\n\'ll !!to?\'d of, can\'t.'''
a =['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a ={}
for i, token in enumerate(__A ):
a =i
a =WordpieceTokenizer(vocab=__A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> str:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.get_tokenizer()
a =self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE ( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a =self.rust_tokenizer_class.from_pretrained(__A , **__A )
a =f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
a =tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
a =tokenizer_r.do_lower_case if hasattr(__A , '''do_lower_case''' ) else False
a =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =['''的''', '''人''', '''有''']
a =''''''.join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a =True
a =self.tokenizer_class.from_pretrained(__A , **__A )
a =self.rust_tokenizer_class.from_pretrained(__A , **__A )
a =tokenizer_p.encode(__A , add_special_tokens=__A )
a =tokenizer_r.encode(__A , add_special_tokens=__A )
a =tokenizer_r.convert_ids_to_tokens(__A )
a =tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
a =False
a =self.rust_tokenizer_class.from_pretrained(__A , **__A )
a =self.tokenizer_class.from_pretrained(__A , **__A )
a =tokenizer_r.encode(__A , add_special_tokens=__A )
a =tokenizer_p.encode(__A , add_special_tokens=__A )
a =tokenizer_r.convert_ids_to_tokens(__A )
a =tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
a =[
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A ) | 81 | 1 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _snake_case ( nn.Module ):
def __init__( self : int ):
super().__init__()
__lowerCamelCase : Union[str, Any] = nn.Linear(3 , 4 )
__lowerCamelCase : Optional[int] = nn.BatchNormad(4 )
__lowerCamelCase : Any = nn.Linear(4 , 5 )
def lowerCamelCase__ ( self : str , UpperCAmelCase : Union[str, Any] ):
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase ) ) )
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase , model.state_dict() )
__lowerCamelCase : Optional[Any] = os.path.join(UpperCAmelCase , "index.json" )
self.assertTrue(os.path.isfile(UpperCAmelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__lowerCamelCase : Dict = os.path.join(UpperCAmelCase , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCAmelCase ) )
# TODO: add tests on the fact weights are properly loaded
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Union[str, Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__lowerCamelCase : List[str] = torch.randn(2 , 3 , dtype=UpperCAmelCase )
with TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Optional[Any] = offload_weight(UpperCAmelCase , "weight" , UpperCAmelCase , {} )
__lowerCamelCase : Union[str, Any] = os.path.join(UpperCAmelCase , "weight.dat" )
self.assertTrue(os.path.isfile(UpperCAmelCase ) )
self.assertDictEqual(UpperCAmelCase , {"weight": {"shape": [2, 3], "dtype": str(UpperCAmelCase ).split("." )[1]}} )
__lowerCamelCase : Dict = load_offloaded_weight(UpperCAmelCase , index["weight"] )
self.assertTrue(torch.equal(UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : str = ModelForTest()
__lowerCamelCase : int = model.state_dict()
__lowerCamelCase : Any = {k: v for k, v in state_dict.items() if "linear2" not in k}
__lowerCamelCase : Dict = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCAmelCase , save_folder=UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase , weight_map[key] ) )
__lowerCamelCase : Union[str, Any] = {k: v for k, v in state_dict.items() if "weight" in k}
__lowerCamelCase : Dict = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[int] = OffloadedWeightsLoader(state_dict=UpperCAmelCase , save_folder=UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCAmelCase , UpperCAmelCase )
# Duplicates are removed
__lowerCamelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCAmelCase , save_folder=UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCAmelCase , weight_map[key] ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : List[Any] = {"a.1": 0, "a.10": 1, "a.2": 2}
__lowerCamelCase : Dict = extract_submodules_state_dict(UpperCAmelCase , ["a.1", "a.2"] )
self.assertDictEqual(UpperCAmelCase , {"a.1": 0, "a.2": 2} )
__lowerCamelCase : str = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
__lowerCamelCase : Optional[int] = extract_submodules_state_dict(UpperCAmelCase , ["a.1", "a.2"] )
self.assertDictEqual(UpperCAmelCase , {"a.1.a": 0, "a.2.a": 2} ) | 64 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "deta"
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Any , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=900 , UpperCAmelCase : str=2048 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : str=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : int=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : List[Any]=0.0_2 , UpperCAmelCase : Optional[Any]=1.0 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=False , UpperCAmelCase : Optional[int]="sine" , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=300 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : str=5 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=1 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Tuple=0.2_5 , **UpperCAmelCase : List[str] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCamelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : List[Any] = backbone_config.pop("model_type" )
__lowerCamelCase : Any = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : List[Any] = config_class.from_dict(UpperCAmelCase )
__lowerCamelCase : List[str] = backbone_config
__lowerCamelCase : Any = num_queries
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Dict = d_model
__lowerCamelCase : List[str] = encoder_ffn_dim
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : str = encoder_attention_heads
__lowerCamelCase : Dict = decoder_ffn_dim
__lowerCamelCase : Tuple = decoder_layers
__lowerCamelCase : str = decoder_attention_heads
__lowerCamelCase : Dict = dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : int = activation_dropout
__lowerCamelCase : int = activation_function
__lowerCamelCase : Any = init_std
__lowerCamelCase : Optional[Any] = init_xavier_std
__lowerCamelCase : int = encoder_layerdrop
__lowerCamelCase : Dict = auxiliary_loss
__lowerCamelCase : Optional[int] = position_embedding_type
# deformable attributes
__lowerCamelCase : Tuple = num_feature_levels
__lowerCamelCase : str = encoder_n_points
__lowerCamelCase : List[str] = decoder_n_points
__lowerCamelCase : List[str] = two_stage
__lowerCamelCase : Dict = two_stage_num_proposals
__lowerCamelCase : int = with_box_refine
__lowerCamelCase : Union[str, Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__lowerCamelCase : str = class_cost
__lowerCamelCase : Optional[Any] = bbox_cost
__lowerCamelCase : Tuple = giou_cost
# Loss coefficients
__lowerCamelCase : List[Any] = mask_loss_coefficient
__lowerCamelCase : Dict = dice_loss_coefficient
__lowerCamelCase : Any = bbox_loss_coefficient
__lowerCamelCase : Dict = giou_loss_coefficient
__lowerCamelCase : Optional[Any] = eos_coefficient
__lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Dict ):
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.d_model
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Dict = self.backbone_config.to_dict()
__lowerCamelCase : str = self.__class__.model_type
return output | 64 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.