code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "upernet"
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : List[Any]=512 , _A : Dict=0.02 , _A : str=[1, 2, 3, 6] , _A : Tuple=True , _A : List[Any]=0.4 , _A : Optional[Any]=384 , _A : List[Any]=256 , _A : Optional[int]=1 , _A : str=False , _A : str=255 , **_A : int , ):
super().__init__(**_A )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(_A , _A ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(_A )
_UpperCamelCase = backbone_config
_UpperCamelCase = hidden_size
_UpperCamelCase = initializer_range
_UpperCamelCase = pool_scales
_UpperCamelCase = use_auxiliary_head
_UpperCamelCase = auxiliary_loss_weight
_UpperCamelCase = auxiliary_in_channels
_UpperCamelCase = auxiliary_channels
_UpperCamelCase = auxiliary_num_convs
_UpperCamelCase = auxiliary_concat_input
_UpperCamelCase = loss_ignore_index
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 10 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | 1 |
from __future__ import annotations
_lowerCAmelCase = 1.6021E-19 # units = C
def _snake_case ( __snake_case , __snake_case , __snake_case , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 1 |
import requests
_lowerCAmelCase = "YOUR API KEY"
def _snake_case ( __snake_case , __snake_case = giphy_api_key ):
_UpperCamelCase = '''+'''.join(query.split() )
_UpperCamelCase = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
_UpperCamelCase = requests.get(__snake_case ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 10 | import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase_ ( self : int , _A : str , _A : Union[str, Any] , _A : Any ):
_UpperCamelCase = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_UpperCamelCase = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_UpperCamelCase = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any] ):
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_UpperCamelCase = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_UpperCamelCase = logging.get_logger('''transformers.generation.utils''' )
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 10 | 1 |
from __future__ import annotations
from typing import Any
def _snake_case ( __snake_case ):
if not postfix_notation:
return 0
_UpperCamelCase = {'''+''', '''-''', '''*''', '''/'''}
_UpperCamelCase = []
for token in postfix_notation:
if token in operations:
_UpperCamelCase , _UpperCamelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__snake_case ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowerCAmelCase = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 10 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _snake_case ( __snake_case , __snake_case=False ):
try:
_UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCamelCase = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
_lowerCAmelCase = parse_flag_from_env("RUN_SLOW", default=False)
_lowerCAmelCase = parse_flag_from_env("RUN_REMOTE", default=False)
_lowerCAmelCase = parse_flag_from_env("RUN_LOCAL", default=True)
_lowerCAmelCase = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
_lowerCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
_lowerCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
_lowerCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
_lowerCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
_lowerCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
_lowerCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
_lowerCAmelCase = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def _snake_case ( __snake_case ):
try:
import faiss # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires faiss''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
try:
import regex # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires regex''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
try:
import elasticsearch # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
try:
import sqlalchemy # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not config.TORCH_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires PyTorch''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not config.TF_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not config.JAX_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires JAX''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not config.PIL_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires Pillow''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(__snake_case )
else:
return test_case
def _snake_case ( __snake_case ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(__snake_case )
else:
return test_case
def _snake_case ( __snake_case ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(__snake_case )
else:
return test_case
def _snake_case ( __snake_case ):
def _require_spacy_model(__snake_case ):
try:
import spacy # noqa F401
spacy.load(__snake_case )
except ImportError:
return unittest.skip('''test requires spacy''' )(__snake_case )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(__snake_case ) )(__snake_case )
else:
return test_case
return _require_spacy_model
def _snake_case ( __snake_case ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(__snake_case )
else:
return test_case
def _snake_case ( __snake_case ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(__snake_case )
else:
return test_case
def _snake_case ( __snake_case ):
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCamelCase = unittest.skip('''test is slow''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not _run_local_tests or _run_local_tests == 0:
_UpperCamelCase = unittest.skip('''test is local''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCamelCase = unittest.skip('''test is packaged''' )(__snake_case )
return test_case
def _snake_case ( __snake_case ):
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCamelCase = unittest.skip('''test requires remote''' )(__snake_case )
return test_case
def _snake_case ( *__snake_case ):
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__snake_case ) and name.startswith('''test''' ):
for decorator in decorators:
_UpperCamelCase = decorator(__snake_case )
setattr(cls , __snake_case , __snake_case )
return cls
return decorate
class lowerCAmelCase_ ( __lowercase ):
pass
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@contextmanager
def _snake_case ( __snake_case=OfflineSimulationMode.CONNECTION_FAILS , __snake_case=1E-16 ):
_UpperCamelCase = requests.Session().request
def timeout_request(__snake_case , __snake_case , __snake_case , **__snake_case ):
# Change the url to an invalid url so that the connection hangs
_UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCamelCase = timeout
try:
return online_request(__snake_case , __snake_case , **__snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCamelCase = url
_UpperCamelCase = e.args[0]
_UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' , f"""OfflineMock[{url}]""" ),)
_UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__snake_case , __snake_case , **__snake_case ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=__snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , __snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , __snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , __snake_case ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def _snake_case ( *__snake_case , **__snake_case ):
_UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__snake_case , **__snake_case ) as tmp_dir:
try:
os.chdir(__snake_case )
yield
finally:
os.chdir(__snake_case )
@contextmanager
def _snake_case ( ):
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _snake_case ( ):
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _snake_case ( __snake_case , __snake_case ):
return deepcopy(__snake_case ).integers(0 , 100 , 10 ).tolist() == deepcopy(__snake_case ).integers(0 , 100 , 10 ).tolist()
def _snake_case ( __snake_case ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__snake_case , *__snake_case , **__snake_case ):
try:
return func(*__snake_case , **__snake_case )
except HTTPError as err:
if str(__snake_case ).startswith('''500''' ) or str(__snake_case ).startswith('''502''' ):
pytest.xfail(str(__snake_case ) )
raise err
return decorator.decorator(_wrapper , __snake_case )
class lowerCAmelCase_ :
def __init__( self : Any , _A : Dict , _A : str , _A : Any ):
_UpperCamelCase = returncode
_UpperCamelCase = stdout
_UpperCamelCase = stderr
async def _snake_case ( __snake_case , __snake_case ):
while True:
_UpperCamelCase = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def _snake_case ( __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(__snake_case ) )
_UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCamelCase = []
_UpperCamelCase = []
def tee(__snake_case , __snake_case , __snake_case , __snake_case="" ):
_UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='''stderr:''' ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def _snake_case ( __snake_case , __snake_case=None , __snake_case=None , __snake_case=180 , __snake_case=False , __snake_case=True ):
_UpperCamelCase = asyncio.get_event_loop()
_UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
_UpperCamelCase = ''' '''.join(__snake_case )
if result.returncode > 0:
_UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def _snake_case ( ):
_UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
_UpperCamelCase = re.sub(R'''^gw''' , '''''' , __snake_case , 0 , re.M )
return int(__snake_case )
def _snake_case ( ):
_UpperCamelCase = 29500
_UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 10 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Optional[Any] , _A : List[str] , _A : Optional[int]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 10 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowerCAmelCase_ :
UpperCAmelCase = BlenderbotConfig
UpperCAmelCase = {}
UpperCAmelCase = "gelu"
def __init__( self : List[str] , _A : List[str] , _A : Union[str, Any]=13 , _A : List[Any]=7 , _A : Any=True , _A : Tuple=False , _A : str=99 , _A : int=32 , _A : Optional[Any]=2 , _A : Optional[int]=4 , _A : Optional[Any]=37 , _A : Any=0.1 , _A : int=0.1 , _A : Union[str, Any]=20 , _A : List[str]=2 , _A : Tuple=1 , _A : Any=0 , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def UpperCamelCase_ ( self : Union[str, Any] , _A : Union[str, Any] , _A : Union[str, Any] ):
_UpperCamelCase = TFBlenderbotModel(config=_A ).get_decoder()
_UpperCamelCase = inputs_dict['''input_ids''']
_UpperCamelCase = input_ids[:1, :]
_UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
_UpperCamelCase = inputs_dict['''head_mask''']
_UpperCamelCase = 1
# first forward pass
_UpperCamelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase = model(_A , attention_mask=_A )[0]
_UpperCamelCase = model(_A , attention_mask=_A , past_key_values=_A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A , _A , rtol=1e-3 )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , ):
if attention_mask is None:
_UpperCamelCase = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCAmelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = TFBlenderbotModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_tokenizers
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = ["My friends are cool but they eat too many carbs."]
UpperCAmelCase = "facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase_ ( self : List[str] ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.tokenizer(self.src_text , return_tensors='''tf''' )
_UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
_UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_A )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 10 | import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase = field(
default=0.1, metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
}, )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, )
UpperCAmelCase = field(
default=0.0_5, metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
}, )
UpperCAmelCase = field(default=0.0, metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="train+validation", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
}, )
UpperCAmelCase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"], metadata={"help": "A list of characters to remove from the transcripts."}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Union[str, Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_UpperCamelCase = labels
return batch
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Dict , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_UpperCamelCase = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(_A , _A )
else:
_UpperCamelCase = self.compute_loss(_A , _A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
return loss.detach()
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_UpperCamelCase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 16000
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case , __snake_case ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__snake_case ) )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
# Base Case
if index == len(__snake_case ):
return True
# Recursive Step
for i in range(__snake_case ):
if valid_coloring(graph[index] , __snake_case , __snake_case ):
# Color current vertex
_UpperCamelCase = i
# Validate coloring
if util_color(__snake_case , __snake_case , __snake_case , index + 1 ):
return True
# Backtrack
_UpperCamelCase = -1
return False
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = [-1] * len(__snake_case )
if util_color(__snake_case , __snake_case , __snake_case , 0 ):
return colored_vertices
return []
| 10 | import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10 | 1 |
_lowerCAmelCase = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 10 | import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _snake_case ( ):
_UpperCamelCase = [randint(-1000 , 1000 ) for i in range(10 )]
_UpperCamelCase = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase = make_dataset()
def _snake_case ( __snake_case , __snake_case ):
for triplet in permutations(__snake_case , 3 ):
if sum(__snake_case ) == target:
return tuple(sorted(__snake_case ) )
return (0, 0, 0)
def _snake_case ( __snake_case , __snake_case ):
arr.sort()
_UpperCamelCase = len(__snake_case )
for i in range(n - 1 ):
_UpperCamelCase , _UpperCamelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _snake_case ( ):
_UpperCamelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
_UpperCamelCase = '''
triplet_sum1(*dataset)
'''
_UpperCamelCase = '''
triplet_sum2(*dataset)
'''
_UpperCamelCase = repeat(setup=__snake_case , stmt=__snake_case , repeat=5 , number=10000 )
_UpperCamelCase = repeat(setup=__snake_case , stmt=__snake_case , repeat=5 , number=10000 )
return (min(__snake_case ), min(__snake_case ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase = solution_times()
print(f'The time for naive implementation is {times[0]}.')
print(f'The time for optimized implementation is {times[1]}.')
| 10 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case ):
if isinstance(__snake_case , np.ndarray ):
return list(tensor.shape )
_UpperCamelCase = tf.shape(__snake_case )
if tensor.shape == tf.TensorShape(__snake_case ):
return dynamic
_UpperCamelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__snake_case )]
def _snake_case ( __snake_case , __snake_case = None , __snake_case = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=__snake_case , name=__snake_case )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=1E-5 , __snake_case=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__snake_case , __snake_case ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
_UpperCamelCase , _UpperCamelCase = tf.nn.moments(__snake_case , axes=[axis] , keepdims=__snake_case )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_UpperCamelCase = [1] * inputs.shape.rank
_UpperCamelCase = shape_list(__snake_case )[axis]
_UpperCamelCase = tf.reshape(__snake_case , __snake_case )
_UpperCamelCase = tf.reshape(__snake_case , __snake_case )
# Compute layer normalization using the batch_normalization
# function.
_UpperCamelCase = tf.nn.batch_normalization(
__snake_case , __snake_case , __snake_case , offset=__snake_case , scale=__snake_case , variance_epsilon=__snake_case , )
return outputs
def _snake_case ( __snake_case , __snake_case=0 , __snake_case=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_UpperCamelCase = tf.shape(__snake_case )
_UpperCamelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_UpperCamelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__snake_case , __snake_case )
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , tf.Tensor ):
_UpperCamelCase = tf.convert_to_tensor(__snake_case ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_UpperCamelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_UpperCamelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _snake_case ( __snake_case , __snake_case , __snake_case = "input_ids" ):
tf.debugging.assert_less(
__snake_case , tf.cast(__snake_case , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(__snake_case )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_UpperCamelCase = [x for x in data if len(__snake_case ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
_UpperCamelCase = np.asarray(__snake_case )
_UpperCamelCase = 1
_UpperCamelCase = np.array_split(__snake_case , __snake_case )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_UpperCamelCase = np.array_split(__snake_case , __snake_case )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__snake_case ):
_UpperCamelCase = chunk_data
else:
_UpperCamelCase = data
def _snake_case ( __snake_case , __snake_case ):
if name in group.attrs:
_UpperCamelCase = [n.decode('''utf8''' ) if hasattr(__snake_case , '''decode''' ) else n for n in group.attrs[name]]
else:
_UpperCamelCase = []
_UpperCamelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__snake_case , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _snake_case ( __snake_case ):
def _expand_single_ad_tensor(__snake_case ):
if isinstance(__snake_case , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__snake_case , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __snake_case )
| 10 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : int , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ):
_UpperCamelCase = dataset
_UpperCamelCase = process
_UpperCamelCase = params
def __len__( self : Optional[Any] ):
return len(self.dataset )
def __getitem__( self : Any , _A : Union[str, Any] ):
_UpperCamelCase = self.dataset[i]
_UpperCamelCase = self.process(_A , **self.params )
return processed
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[str] , _A : Union[str, Any] , _A : List[Any] , _A : Optional[int] , _A : Any=None ):
_UpperCamelCase = loader
_UpperCamelCase = infer
_UpperCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCamelCase = None
_UpperCamelCase = loader_batch_size
# Internal bookkeeping
_UpperCamelCase = None
_UpperCamelCase = None
def __len__( self : str ):
return len(self.loader )
def __iter__( self : str ):
_UpperCamelCase = iter(self.loader )
return self
def UpperCamelCase_ ( self : List[str] ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A ):
# Convert ModelOutput to tuple first
_UpperCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCamelCase = self._loader_batch_data.__class__(_A )
self._loader_batch_index += 1
return result
def UpperCamelCase_ ( self : Dict ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCamelCase = next(self.iterator )
_UpperCamelCase = self.infer(_A , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor ):
_UpperCamelCase = processed
else:
_UpperCamelCase = list(processed.keys() )[0]
_UpperCamelCase = processed[key]
if isinstance(_A , _A ):
_UpperCamelCase = len(_A )
else:
_UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCamelCase = processed
_UpperCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : str , _A : Dict , _A : List[str] , _A : str , _A : int=None ):
super().__init__(_A , _A , _A )
def __iter__( self : Optional[int] ):
_UpperCamelCase = iter(self.loader )
_UpperCamelCase = None
return self
def UpperCamelCase_ ( self : Optional[int] ):
if self.subiterator is None:
_UpperCamelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCamelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCamelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCamelCase = next(self.subiterator )
return processed
class lowerCAmelCase_ ( __lowercase ):
def __iter__( self : Optional[Any] ):
_UpperCamelCase = iter(self.loader )
return self
def UpperCamelCase_ ( self : Union[str, Any] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_UpperCamelCase = False
_UpperCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase = self.loader_batch_item()
_UpperCamelCase = item.pop('''is_last''' )
accumulator.append(_A )
if is_last:
return accumulator
while not is_last:
_UpperCamelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor ):
_UpperCamelCase = processed
else:
_UpperCamelCase = list(processed.keys() )[0]
_UpperCamelCase = processed[key]
if isinstance(_A , _A ):
_UpperCamelCase = len(_A )
else:
_UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase = observed_batch_size
_UpperCamelCase = processed
_UpperCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase = self.loader_batch_item()
_UpperCamelCase = item.pop('''is_last''' )
accumulator.append(_A )
if is_last:
return accumulator
else:
_UpperCamelCase = processed
_UpperCamelCase = item.pop('''is_last''' )
accumulator.append(_A )
return accumulator
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[str] , _A : Dataset , _A : str ):
_UpperCamelCase = dataset
_UpperCamelCase = key
def __len__( self : List[str] ):
return len(self.dataset )
def __getitem__( self : Tuple , _A : Union[str, Any] ):
return self.dataset[i][self.key]
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : Dataset , _A : str , _A : str ):
_UpperCamelCase = dataset
_UpperCamelCase = keya
_UpperCamelCase = keya
def __len__( self : List[str] ):
return len(self.dataset )
def __getitem__( self : Optional[int] , _A : Any ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 10 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def UpperCamelCase_ ( self : Dict ):
super().setUp()
_UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_UpperCamelCase = dict(zip(_A , range(len(_A ) ) ) )
_UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCamelCase = {'''unk_token''': '''<unk>'''}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def UpperCamelCase_ ( self : List[Any] , **_A : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : Any , **_A : Any ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : Tuple , _A : str ):
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase_ ( self : str ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def UpperCamelCase_ ( self : Any ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_UpperCamelCase = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
@require_torch
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase = tokenizer(_A , padding=_A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''labels''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
@require_torch
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase = tokenizer(text_target=_A , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def UpperCamelCase_ ( self : List[Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = ['''A long paragraph for summarization.''']
_UpperCamelCase = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase = tokenizer(_A , return_tensors='''pt''' )
_UpperCamelCase = tokenizer(text_target=_A , return_tensors='''pt''' )
_UpperCamelCase = inputs['''input_ids''']
_UpperCamelCase = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCamelCase_ ( self : str ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCamelCase = ['''Summary of the text.''', '''Another summary.''']
_UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_UpperCamelCase = tokenizer(_A , padding=_A )
_UpperCamelCase = [[0] * len(_A ) for x in encoded_output['''input_ids''']]
_UpperCamelCase = tokenizer.pad(_A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _A )
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_UpperCamelCase = self.tokenizer_class.from_pretrained(_A , **_A )
_UpperCamelCase = '''A, <mask> AllenNLP sentence.'''
_UpperCamelCase = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
_UpperCamelCase = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 10 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 1 |
import random
def _snake_case ( __snake_case , __snake_case , __snake_case = False ):
_UpperCamelCase = {i: [] for i in range(__snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if random.random() < probability:
graph[i].append(__snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__snake_case )
return graph
def _snake_case ( __snake_case ):
return {
i: [j for j in range(__snake_case ) if i != j] for i in range(__snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCAmelCase_ ( __lowercase ):
def __get__( self : Union[str, Any] , _A : Any , _A : Union[str, Any]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
_UpperCamelCase = '''__cached_''' + self.fget.__name__
_UpperCamelCase = getattr(_A , _A , _A )
if cached is None:
_UpperCamelCase = self.fget(_A )
setattr(_A , _A , _A )
return cached
def _snake_case ( __snake_case ):
_UpperCamelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def _snake_case ( __snake_case ):
if is_torch_fx_proxy(__snake_case ):
return True
if is_torch_available():
import torch
if isinstance(__snake_case , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__snake_case , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__snake_case , (jnp.ndarray, Tracer) ):
return True
return isinstance(__snake_case , np.ndarray )
def _snake_case ( __snake_case ):
return isinstance(__snake_case , np.ndarray )
def _snake_case ( __snake_case ):
return _is_numpy(__snake_case )
def _snake_case ( __snake_case ):
import torch
return isinstance(__snake_case , torch.Tensor )
def _snake_case ( __snake_case ):
return False if not is_torch_available() else _is_torch(__snake_case )
def _snake_case ( __snake_case ):
import torch
return isinstance(__snake_case , torch.device )
def _snake_case ( __snake_case ):
return False if not is_torch_available() else _is_torch_device(__snake_case )
def _snake_case ( __snake_case ):
import torch
if isinstance(__snake_case , __snake_case ):
if hasattr(__snake_case , __snake_case ):
_UpperCamelCase = getattr(__snake_case , __snake_case )
else:
return False
return isinstance(__snake_case , torch.dtype )
def _snake_case ( __snake_case ):
return False if not is_torch_available() else _is_torch_dtype(__snake_case )
def _snake_case ( __snake_case ):
import tensorflow as tf
return isinstance(__snake_case , tf.Tensor )
def _snake_case ( __snake_case ):
return False if not is_tf_available() else _is_tensorflow(__snake_case )
def _snake_case ( __snake_case ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__snake_case , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(__snake_case )
return type(__snake_case ) == tf.Tensor
def _snake_case ( __snake_case ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(__snake_case )
def _snake_case ( __snake_case ):
import jax.numpy as jnp # noqa: F811
return isinstance(__snake_case , jnp.ndarray )
def _snake_case ( __snake_case ):
return False if not is_flax_available() else _is_jax(__snake_case )
def _snake_case ( __snake_case ):
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_py_obj(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return [to_py_obj(__snake_case ) for o in obj]
elif is_tf_tensor(__snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case ).tolist()
elif isinstance(__snake_case , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _snake_case ( __snake_case ):
if isinstance(__snake_case , (dict, UserDict) ):
return {k: to_numpy(__snake_case ) for k, v in obj.items()}
elif isinstance(__snake_case , (list, tuple) ):
return np.array(__snake_case )
elif is_tf_tensor(__snake_case ):
return obj.numpy()
elif is_torch_tensor(__snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__snake_case ):
return np.asarray(__snake_case )
else:
return obj
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = fields(self )
# Safety and consistency checks
if not len(_A ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
_UpperCamelCase = getattr(self , class_fields[0].name )
_UpperCamelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_A ):
if isinstance(_A , _A ):
_UpperCamelCase = first_field.items()
_UpperCamelCase = True
else:
try:
_UpperCamelCase = iter(_A )
_UpperCamelCase = True
except TypeError:
_UpperCamelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_A ):
if (
not isinstance(_A , (list, tuple) )
or not len(_A ) == 2
or not isinstance(element[0] , _A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCamelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_UpperCamelCase = element[1]
elif first_field is not None:
_UpperCamelCase = first_field
else:
for field in class_fields:
_UpperCamelCase = getattr(self , field.name )
if v is not None:
_UpperCamelCase = v
def __delitem__( self : Any , *_A : Union[str, Any] , **_A : Tuple ):
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def UpperCamelCase_ ( self : Optional[Any] , *_A : Dict , **_A : Any ):
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def UpperCamelCase_ ( self : Dict , *_A : int , **_A : Any ):
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def UpperCamelCase_ ( self : Union[str, Any] , *_A : List[str] , **_A : int ):
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Dict , _A : str ):
if isinstance(_A , _A ):
_UpperCamelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int , _A : Dict , _A : Optional[Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_A , _A )
super().__setattr__(_A , _A )
def __setitem__( self : List[str] , _A : Optional[int] , _A : Optional[Any] ):
# Will raise a KeyException if needed
super().__setitem__(_A , _A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_A , _A )
def UpperCamelCase_ ( self : Any ):
return tuple(self[k] for k in self.keys() )
class lowerCAmelCase_ ( __lowercase, __lowercase ):
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , _A : str ):
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "longest"
UpperCAmelCase = "max_length"
UpperCAmelCase = "do_not_pad"
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "pt"
UpperCAmelCase = "tf"
UpperCAmelCase = "np"
UpperCAmelCase = "jax"
class lowerCAmelCase_ :
def __init__( self : Union[str, Any] , _A : List[ContextManager] ):
_UpperCamelCase = context_managers
_UpperCamelCase = ExitStack()
def __enter__( self : int ):
for context_manager in self.context_managers:
self.stack.enter_context(_A )
def __exit__( self : str , *_A : Dict , **_A : Tuple ):
self.stack.__exit__(*_A , **_A )
def _snake_case ( __snake_case ):
_UpperCamelCase = infer_framework(__snake_case )
if framework == "tf":
_UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _snake_case ( __snake_case ):
_UpperCamelCase = model_class.__name__
_UpperCamelCase = infer_framework(__snake_case )
if framework == "tf":
_UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _snake_case ( __snake_case , __snake_case = "" , __snake_case = "." ):
def _flatten_dict(__snake_case , __snake_case="" , __snake_case="." ):
for k, v in d.items():
_UpperCamelCase = str(__snake_case ) + delimiter + str(__snake_case ) if parent_key else k
if v and isinstance(__snake_case , __snake_case ):
yield from flatten_dict(__snake_case , __snake_case , delimiter=__snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(__snake_case , __snake_case , __snake_case ) )
@contextmanager
def _snake_case ( __snake_case , __snake_case = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _snake_case ( __snake_case , __snake_case=None ):
if is_numpy_array(__snake_case ):
return np.transpose(__snake_case , axes=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.T if axes is None else array.permute(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.transpose(__snake_case , perm=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.transpose(__snake_case , axes=__snake_case )
else:
raise ValueError(f"""Type not supported for transpose: {type(__snake_case )}.""" )
def _snake_case ( __snake_case , __snake_case ):
if is_numpy_array(__snake_case ):
return np.reshape(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.reshape(*__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.reshape(__snake_case , __snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.reshape(__snake_case , __snake_case )
else:
raise ValueError(f"""Type not supported for reshape: {type(__snake_case )}.""" )
def _snake_case ( __snake_case , __snake_case=None ):
if is_numpy_array(__snake_case ):
return np.squeeze(__snake_case , axis=__snake_case )
elif is_torch_tensor(__snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.squeeze(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.squeeze(__snake_case , axis=__snake_case )
else:
raise ValueError(f"""Type not supported for squeeze: {type(__snake_case )}.""" )
def _snake_case ( __snake_case , __snake_case ):
if is_numpy_array(__snake_case ):
return np.expand_dims(__snake_case , __snake_case )
elif is_torch_tensor(__snake_case ):
return array.unsqueeze(dim=__snake_case )
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.expand_dims(__snake_case , axis=__snake_case )
elif is_jax_tensor(__snake_case ):
return jnp.expand_dims(__snake_case , axis=__snake_case )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(__snake_case )}.""" )
def _snake_case ( __snake_case ):
if is_numpy_array(__snake_case ):
return np.size(__snake_case )
elif is_torch_tensor(__snake_case ):
return array.numel()
elif is_tf_tensor(__snake_case ):
import tensorflow as tf
return tf.size(__snake_case )
elif is_jax_tensor(__snake_case ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(__snake_case )}.""" )
def _snake_case ( __snake_case , __snake_case ):
for key, value in auto_map.items():
if isinstance(__snake_case , (tuple, list) ):
_UpperCamelCase = [f"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCamelCase = f"""{repo_id}--{value}"""
return auto_map
def _snake_case ( __snake_case ):
for base_class in inspect.getmro(__snake_case ):
_UpperCamelCase = base_class.__module__
_UpperCamelCase = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 10 | import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | 1 |
from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_lowerCAmelCase = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
_lowerCAmelCase = f'https://www.google.com/search?q={query}&num=100'
_lowerCAmelCase = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
_lowerCAmelCase = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
_lowerCAmelCase = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 10 | import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = "▁"
_lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCAmelCase = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
_lowerCAmelCase = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
_lowerCAmelCase = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = []
UpperCAmelCase = []
def __init__( self : int , _A : Union[str, Any] , _A : Optional[int]="<s>" , _A : str="</s>" , _A : Optional[int]="</s>" , _A : Tuple="<s>" , _A : Dict="<unk>" , _A : List[str]="<pad>" , _A : Dict="<mask>" , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Any=None , _A : Optional[Dict[str, Any]] = None , _A : List[str]=None , _A : str=False , **_A : int , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCamelCase = legacy_behaviour
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , tokenizer_file=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_A , **_A , )
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
_UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase = 1
_UpperCamelCase = len(self.sp_model )
_UpperCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A )
}
_UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCamelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCamelCase = src_lang if src_lang is not None else '''eng_Latn'''
_UpperCamelCase = self.lang_code_to_id[self._src_lang]
_UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
_UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , _A : Any ):
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase_ ( self : Optional[int] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase_ ( self : int ):
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : int , _A : str ):
_UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
_UpperCamelCase = [1] * len(self.prefix_tokens )
_UpperCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def UpperCamelCase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ):
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self : Optional[int] , _A : str , _A : str , _A : Optional[str] , _A : Optional[str] , **_A : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_UpperCamelCase = src_lang
_UpperCamelCase = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
_UpperCamelCase = self.convert_tokens_to_ids(_A )
_UpperCamelCase = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : int , _A : str ):
return self.sp_model.encode(_A , out_type=_A )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase_ ( self : Union[str, Any] , _A : str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : List[Any] , _A : Dict ):
_UpperCamelCase = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ):
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str] , _A : str = "eng_Latn" , _A : Optional[List[str]] = None , _A : str = "fra_Latn" , **_A : Optional[int] , ):
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : Optional[int] , _A : Tuple ):
_UpperCamelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_UpperCamelCase = []
_UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase = [self.cur_lang_code]
_UpperCamelCase = [self.eos_token_id]
def UpperCamelCase_ ( self : Optional[Any] , _A : str ):
_UpperCamelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_UpperCamelCase = []
_UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase = [self.cur_lang_code]
_UpperCamelCase = [self.eos_token_id]
| 10 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 1 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCAmelCase_ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _snake_case ( ):
if os.name == "nt":
_UpperCamelCase = CursorInfo()
_UpperCamelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
_UpperCamelCase = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def _snake_case ( ):
if os.name == "nt":
_UpperCamelCase = CursorInfo()
_UpperCamelCase = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
_UpperCamelCase = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def _snake_case ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 10 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "instructblip_vision_model"
def __init__( self : str , _A : Dict=1408 , _A : Union[str, Any]=6144 , _A : Union[str, Any]=39 , _A : int=16 , _A : Dict=224 , _A : Dict=14 , _A : Any="gelu" , _A : Dict=1e-6 , _A : List[str]=0.0 , _A : List[str]=1e-10 , _A : List[str]=True , **_A : Dict , ):
super().__init__(**_A )
_UpperCamelCase = hidden_size
_UpperCamelCase = intermediate_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = patch_size
_UpperCamelCase = image_size
_UpperCamelCase = initializer_range
_UpperCamelCase = attention_dropout
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = hidden_act
_UpperCamelCase = qkv_bias
@classmethod
def UpperCamelCase_ ( cls : Dict , _A : Union[str, os.PathLike] , **_A : Union[str, Any] ):
cls._set_token_in_kwargs(_A )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
_UpperCamelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "instructblip_qformer"
def __init__( self : List[str] , _A : int=3_0522 , _A : Tuple=768 , _A : List[str]=12 , _A : List[Any]=12 , _A : Any=3072 , _A : int="gelu" , _A : Union[str, Any]=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Any=0.02 , _A : Optional[int]=1e-12 , _A : Tuple=0 , _A : Any="absolute" , _A : Dict=2 , _A : Dict=1408 , **_A : Optional[Any] , ):
super().__init__(pad_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = cross_attention_frequency
_UpperCamelCase = encoder_hidden_size
@classmethod
def UpperCamelCase_ ( cls : List[str] , _A : Union[str, os.PathLike] , **_A : Tuple ):
cls._set_token_in_kwargs(_A )
_UpperCamelCase , _UpperCamelCase = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
_UpperCamelCase = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_A , **_A )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "instructblip"
UpperCAmelCase = True
def __init__( self : Tuple , _A : List[str]=None , _A : str=None , _A : Optional[int]=None , _A : Any=32 , **_A : Dict ):
super().__init__(**_A )
if vision_config is None:
_UpperCamelCase = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
_UpperCamelCase = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
_UpperCamelCase = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
_UpperCamelCase = InstructBlipVisionConfig(**_A )
_UpperCamelCase = InstructBlipQFormerConfig(**_A )
_UpperCamelCase = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
_UpperCamelCase = CONFIG_MAPPING[text_model_type](**_A )
_UpperCamelCase = self.text_config.tie_word_embeddings
_UpperCamelCase = self.text_config.is_encoder_decoder
_UpperCamelCase = num_query_tokens
_UpperCamelCase = self.vision_config.hidden_size
_UpperCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCamelCase = 1.0
_UpperCamelCase = 0.02
@classmethod
def UpperCamelCase_ ( cls : str , _A : InstructBlipVisionConfig , _A : InstructBlipQFormerConfig , _A : PretrainedConfig , **_A : Tuple , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.vision_config.to_dict()
_UpperCamelCase = self.qformer_config.to_dict()
_UpperCamelCase = self.text_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 10 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 10 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | 1 |
def _snake_case ( __snake_case ):
_UpperCamelCase = [0] * len(__snake_case )
for i in range(1 , len(__snake_case ) ):
# use last results for better performance - dynamic programming
_UpperCamelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCamelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCamelCase = j
return prefix_result
def _snake_case ( __snake_case ):
return max(prefix_function(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase_ ( self : int , _A : str , _A : Union[str, Any] , _A : Any ):
_UpperCamelCase = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_UpperCamelCase = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_UpperCamelCase = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any] ):
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_UpperCamelCase = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_UpperCamelCase = logging.get_logger('''transformers.generation.utils''' )
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(__snake_case ):
for j in range(__snake_case ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = [[float('''inf''' ) for _ in range(__snake_case )] for _ in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
_UpperCamelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__snake_case ):
# looping through rows of graph array
for i in range(__snake_case ):
# looping through columns of graph array
for j in range(__snake_case ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_UpperCamelCase = dist[i][k] + dist[k][j]
_print_dist(__snake_case , __snake_case )
return dist, v
if __name__ == "__main__":
_lowerCAmelCase = int(input("Enter number of vertices: "))
_lowerCAmelCase = int(input("Enter number of edges: "))
_lowerCAmelCase = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
_lowerCAmelCase = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
_lowerCAmelCase = int(input("Enter source:"))
_lowerCAmelCase = int(input("Enter destination:"))
_lowerCAmelCase = float(input("Enter weight:"))
_lowerCAmelCase = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 10 | def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCAmelCase = None
UpperCAmelCase = None
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
UpperCAmelCase = datasets.Audio()
UpperCAmelCase = "audio"
UpperCAmelCase = AudioFolderConfig
UpperCAmelCase = 42 # definition at the bottom of the script
UpperCAmelCase = AudioClassification(audio_column="audio", label_column="label" )
_lowerCAmelCase = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
_lowerCAmelCase = AUDIO_EXTENSIONS
| 10 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase_ :
def __init__( self : Dict , _A : Optional[Any] , _A : Dict=13 , _A : int=7 , _A : List[str]=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : List[Any]=99 , _A : Any=32 , _A : Union[str, Any]=2 , _A : Optional[int]=4 , _A : int=37 , _A : Any="gelu" , _A : int=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : List[Any]=16 , _A : Tuple=2 , _A : List[Any]=0.02 , _A : List[Any]=False , _A : int=True , _A : Union[str, Any]="None" , _A : Optional[Any]=3 , _A : Dict=4 , _A : Any=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : List[str] , _A : Optional[Any] , _A : Tuple , _A : Optional[Any] , _A : str , _A : str , _A : Optional[Any] , _A : Dict ):
_UpperCamelCase = TFDebertaVaModel(config=_A )
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[Any] , _A : int , _A : List[str] , _A : str , _A : Tuple , _A : List[Any] , _A : Dict , _A : Union[str, Any] ):
_UpperCamelCase = TFDebertaVaForMaskedLM(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : int , _A : str , _A : str , _A : Union[str, Any] , _A : int , _A : Optional[Any] , _A : str , _A : Dict ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForSequenceClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[Any] , _A : List[str] , _A : Tuple , _A : int , _A : int , _A : Tuple , _A : Tuple , _A : int ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForTokenClassification(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Dict , _A : Any , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Tuple , _A : Any , _A : Union[str, Any] ):
_UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_A )
_UpperCamelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = TFDebertaVaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_A )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase_ ( self : int ):
pass
@slow
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
_UpperCamelCase = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(_A , attention_mask=_A )[0]
_UpperCamelCase = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _A , atol=1e-4 )
| 10 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Optional[Any] , _A : List[str] , _A : Optional[int]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 10 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "speech_to_text_2"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , _A : Dict=1_0000 , _A : Any=6 , _A : Optional[int]=2048 , _A : str=4 , _A : int=0.0 , _A : Tuple=True , _A : Dict="relu" , _A : Optional[Any]=256 , _A : str=0.1 , _A : List[Any]=0.0 , _A : Tuple=0.0 , _A : Optional[int]=0.02 , _A : Optional[Any]=2 , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : Optional[int]=0 , _A : List[Any]=2 , _A : str=1024 , **_A : str , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = decoder_layers
_UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase = max_target_positions
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase = field(
default=0.1, metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
}, )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, )
UpperCAmelCase = field(
default=0.0_5, metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
}, )
UpperCAmelCase = field(default=0.0, metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="train+validation", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
}, )
UpperCAmelCase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"], metadata={"help": "A list of characters to remove from the transcripts."}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Union[str, Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_UpperCamelCase = labels
return batch
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Dict , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_UpperCamelCase = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(_A , _A )
else:
_UpperCamelCase = self.compute_loss(_A , _A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
return loss.detach()
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_UpperCamelCase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 16000
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 10 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = LayoutLMTokenizer
UpperCAmelCase = LayoutLMTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
_UpperCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Optional[int] , **_A : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : int , _A : Any ):
_UpperCamelCase = '''UNwant\u00E9d,running'''
_UpperCamelCase = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.tokenizer_class(self.vocab_file )
_UpperCamelCase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : List[str] ):
pass
| 10 | import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
def __init__( self : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]=13 , _A : int=30 , _A : Union[str, Any]=2 , _A : Tuple=3 , _A : str=True , _A : Optional[int]=True , _A : Optional[int]=32 , _A : Dict=2 , _A : Optional[Any]=4 , _A : Optional[int]=37 , _A : Tuple="gelu" , _A : Tuple=0.1 , _A : Optional[int]=0.1 , _A : Any=10 , _A : Any=0.02 , _A : Union[str, Any]=3 , _A : str=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = num_patches + 1
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : int ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : int , _A : Dict ):
_UpperCamelCase = TFViTModel(config=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_UpperCamelCase = self.image_size // 2
_UpperCamelCase = pixel_values[:, :, :image_size, :image_size]
_UpperCamelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
_UpperCamelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : int , _A : Any , _A : Union[str, Any] , _A : List[str] ):
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = TFViTForImageClassification(_A )
_UpperCamelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_UpperCamelCase = self.image_size // 2
_UpperCamelCase = pixel_values[:, :, :image_size, :image_size]
_UpperCamelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = TFViTForImageClassification(_A )
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = TFViTModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase_ ( self : List[str] ):
pass
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(_A )
_UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def _snake_case ( ):
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : List[Any] ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
_UpperCamelCase = model(**_A )
# verify the logits
_UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
_UpperCamelCase = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1e-4 )
| 10 | import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
# Load configuration defined in the metadata file
with open(__snake_case ) as metadata_file:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = LukeConfig(use_entity_aware_attention=__snake_case , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCamelCase = torch.load(__snake_case , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_UpperCamelCase = load_original_entity_vocab(__snake_case )
# add an entry for [MASK2]
_UpperCamelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase = AddedToken('''<ent>''' , lstrip=__snake_case , rstrip=__snake_case )
_UpperCamelCase = AddedToken('''<ent2>''' , lstrip=__snake_case , rstrip=__snake_case )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__snake_case )
with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , '''r''' ) as f:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = '''MLukeTokenizer'''
with open(os.path.join(__snake_case , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(__snake_case , __snake_case )
with open(os.path.join(__snake_case , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(__snake_case , __snake_case )
_UpperCamelCase = MLukeTokenizer.from_pretrained(__snake_case )
# Initialize the embeddings of the special tokens
_UpperCamelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_UpperCamelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_UpperCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCamelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCamelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCamelCase = state_dict[bias_name]
_UpperCamelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCamelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase = f"""encoder.layer.{layer_index}.attention.self."""
_UpperCamelCase = state_dict[prefix + matrix_name]
_UpperCamelCase = state_dict[prefix + matrix_name]
_UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCamelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCamelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCamelCase = state_dict['''entity_predictions.bias''']
_UpperCamelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCamelCase = LukeForMaskedLM(config=__snake_case ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_UpperCamelCase = state_dict[key]
else:
_UpperCamelCase = state_dict[key]
_UpperCamelCase , _UpperCamelCase = model.load_state_dict(__snake_case , strict=__snake_case )
if set(__snake_case ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__snake_case ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCamelCase = MLukeTokenizer.from_pretrained(__snake_case , task='''entity_classification''' )
_UpperCamelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_UpperCamelCase = (0, 9)
_UpperCamelCase = tokenizer(__snake_case , entity_spans=[span] , return_tensors='''pt''' )
_UpperCamelCase = model(**__snake_case )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase = torch.Size((1, 33, 768) )
_UpperCamelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCamelCase = torch.Size((1, 1, 768) )
_UpperCamelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCamelCase = MLukeTokenizer.from_pretrained(__snake_case )
_UpperCamelCase = '''Tokyo is the capital of <mask>.'''
_UpperCamelCase = (24, 30)
_UpperCamelCase = tokenizer(__snake_case , entity_spans=[span] , return_tensors='''pt''' )
_UpperCamelCase = model(**__snake_case )
_UpperCamelCase = encoding['''input_ids'''][0].tolist()
_UpperCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_UpperCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__snake_case )
_UpperCamelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCamelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__snake_case ) )
model.save_pretrained(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_UpperCamelCase = [json.loads(__snake_case ) for line in open(__snake_case )]
_UpperCamelCase = {}
for entry in data:
_UpperCamelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCamelCase = entity_id
break
_UpperCamelCase = f"""{language}:{entity_name}"""
_UpperCamelCase = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_lowerCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 10 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case ):
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 10 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | 1 |
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , _A : list ):
_UpperCamelCase = set_counts
_UpperCamelCase = max(_A )
_UpperCamelCase = len(_A )
_UpperCamelCase = [1] * num_sets
_UpperCamelCase = list(range(_A ) )
def UpperCamelCase_ ( self : int , _A : int , _A : int ):
_UpperCamelCase = self.get_parent(_A )
_UpperCamelCase = self.get_parent(_A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_UpperCamelCase = 0
_UpperCamelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_UpperCamelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_UpperCamelCase = 0
_UpperCamelCase = src_parent
_UpperCamelCase = self.set_counts[src_parent]
_UpperCamelCase = max(self.max_set , _A )
return True
def UpperCamelCase_ ( self : Optional[int] , _A : int ):
if self.parents[disj_set] == disj_set:
return disj_set
_UpperCamelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
import os
def _snake_case ( __snake_case = "input.txt" ):
with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as input_file:
_UpperCamelCase = [
[int(__snake_case ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(matrix[0] )
_UpperCamelCase = [[-1 for _ in range(__snake_case )] for _ in range(__snake_case )]
for i in range(__snake_case ):
_UpperCamelCase = matrix[i][0]
for j in range(1 , __snake_case ):
for i in range(__snake_case ):
_UpperCamelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __snake_case ):
_UpperCamelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_UpperCamelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 1 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] ):
# test for the above condition
self.test()
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = 0
_UpperCamelCase = False
while not completed:
if counter == 1:
self.reset()
_UpperCamelCase = self.advance()
if not self.does_advance(_A ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.update(_A )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def UpperCamelCase_ ( self : Union[str, Any] ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase_ ( self : Any , _A : int ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase_ ( self : Optional[Any] , _A : int ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase_ ( self : Optional[Any] ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase_ ( self : str ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCamelCase_ ( self : Optional[int] , _A : str=False ):
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , _A : List[int] ):
super(_A , self ).__init__()
if not isinstance(_A , _A ) or len(_A ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_UpperCamelCase = token_ids
_UpperCamelCase = len(self.token_ids )
_UpperCamelCase = -1 # the index of the currently fulfilled step
_UpperCamelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self : int , _A : int ):
if not isinstance(_A , _A ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_A )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase_ ( self : Optional[Any] , _A : int ):
if not isinstance(_A , _A ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_A )}""" )
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
if self.does_advance(_A ):
self.fulfilled_idx += 1
_UpperCamelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
_UpperCamelCase = True
_UpperCamelCase = completed
else:
# failed to make progress.
_UpperCamelCase = True
self.reset()
return stepped, completed, reset
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = False
_UpperCamelCase = 0
def UpperCamelCase_ ( self : str ):
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase_ ( self : Any , _A : Optional[int]=False ):
_UpperCamelCase = PhrasalConstraint(self.token_ids )
if stateful:
_UpperCamelCase = self.seqlen
_UpperCamelCase = self.fulfilled_idx
_UpperCamelCase = self.completed
return new_constraint
class lowerCAmelCase_ :
def __init__( self : Any , _A : List[List[int]] , _A : Optional[int]=True ):
_UpperCamelCase = max([len(_A ) for one in nested_token_ids] )
_UpperCamelCase = {}
for token_ids in nested_token_ids:
_UpperCamelCase = root
for tidx, token_id in enumerate(_A ):
if token_id not in level:
_UpperCamelCase = {}
_UpperCamelCase = level[token_id]
if no_subsets and self.has_subsets(_A , _A ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
_UpperCamelCase = root
def UpperCamelCase_ ( self : str , _A : Union[str, Any] ):
_UpperCamelCase = self.trie
for current_token in current_seq:
_UpperCamelCase = start[current_token]
_UpperCamelCase = list(start.keys() )
return next_tokens
def UpperCamelCase_ ( self : Any , _A : Optional[Any] ):
_UpperCamelCase = self.next_tokens(_A )
return len(_A ) == 0
def UpperCamelCase_ ( self : int , _A : List[str] ):
_UpperCamelCase = list(root.values() )
if len(_A ) == 0:
return 1
else:
return sum([self.count_leaves(_A ) for nn in next_nodes] )
def UpperCamelCase_ ( self : Optional[Any] , _A : Optional[Any] , _A : Any ):
_UpperCamelCase = self.count_leaves(_A )
return len(_A ) != leaf_count
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : List[List[int]] ):
super(_A , self ).__init__()
if not isinstance(_A , _A ) or len(_A ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_A , _A ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_UpperCamelCase = DisjunctiveTrie(_A )
_UpperCamelCase = nested_token_ids
_UpperCamelCase = self.trie.max_height
_UpperCamelCase = []
_UpperCamelCase = False
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.trie.next_tokens(self.current_seq )
if len(_A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self : Optional[int] , _A : int ):
if not isinstance(_A , _A ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}""" )
_UpperCamelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase_ ( self : Union[str, Any] , _A : int ):
if not isinstance(_A , _A ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}""" )
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
if self.does_advance(_A ):
self.current_seq.append(_A )
_UpperCamelCase = True
else:
_UpperCamelCase = True
self.reset()
_UpperCamelCase = self.trie.reached_leaf(self.current_seq )
_UpperCamelCase = completed
return stepped, completed, reset
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = False
_UpperCamelCase = []
def UpperCamelCase_ ( self : int ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase_ ( self : List[str] , _A : int=False ):
_UpperCamelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
_UpperCamelCase = self.seqlen
_UpperCamelCase = self.current_seq
_UpperCamelCase = self.completed
return new_constraint
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : List[Constraint] ):
_UpperCamelCase = constraints
# max # of steps required to fulfill a given constraint
_UpperCamelCase = max([c.seqlen for c in constraints] )
_UpperCamelCase = len(_A )
_UpperCamelCase = False
self.init_state()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = []
_UpperCamelCase = None
_UpperCamelCase = [constraint.copy(stateful=_A ) for constraint in self.constraints]
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_UpperCamelCase = constraint.advance()
if isinstance(_A , _A ):
token_list.append(_A )
elif isinstance(_A , _A ):
token_list.extend(_A )
else:
_UpperCamelCase = self.inprogress_constraint.advance()
if isinstance(_A , _A ):
token_list.append(_A )
elif isinstance(_A , _A ):
token_list.extend(_A )
if len(_A ) == 0:
return None
else:
return token_list
def UpperCamelCase_ ( self : Optional[int] , _A : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_UpperCamelCase , _UpperCamelCase = self.add(_A )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase_ ( self : Union[str, Any] , _A : int ):
if not isinstance(_A , _A ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
_UpperCamelCase , _UpperCamelCase = False, False
if self.completed:
_UpperCamelCase = True
_UpperCamelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.inprogress_constraint.update(_A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_A ) )
_UpperCamelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_UpperCamelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
_UpperCamelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_A ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = pending_constraint.update(_A )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_A )
_UpperCamelCase = None
if not complete and stepped:
_UpperCamelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_UpperCamelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_UpperCamelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase_ ( self : str , _A : Optional[Any]=True ):
_UpperCamelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_UpperCamelCase = [
constraint.copy(stateful=_A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_UpperCamelCase = self.inprogress_constraint.copy(stateful=_A )
_UpperCamelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 10 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 1 |
def _snake_case ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__snake_case , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | 1 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = CpmAntTokenizer
UpperCAmelCase = False
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
_UpperCamelCase = '''今天天气真好!'''
_UpperCamelCase = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = '''今天天气真好!'''
_UpperCamelCase = [tokenizer.bos_token] + tokens
_UpperCamelCase = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
_UpperCamelCase = tokenizer.decode(_A )
self.assertEqual(_A , _A )
| 10 | import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCAmelCase = (720, 1_280) # Height, Width
_lowerCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCAmelCase = 1 / 100
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = 250
def _snake_case ( ):
_UpperCamelCase , _UpperCamelCase = get_dataset(__snake_case , __snake_case )
for index in range(__snake_case ):
_UpperCamelCase = random.sample(range(len(__snake_case ) ) , 4 )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = update_image_and_anno(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , filter_scale=__snake_case , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_UpperCamelCase = random_chars(32 )
_UpperCamelCase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
_UpperCamelCase = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , __snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_UpperCamelCase = []
for anno in new_annos:
_UpperCamelCase = anno[3] - anno[1]
_UpperCamelCase = anno[4] - anno[2]
_UpperCamelCase = anno[1] + width / 2
_UpperCamelCase = anno[2] + height / 2
_UpperCamelCase = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(__snake_case )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = []
for label_file in glob.glob(os.path.join(__snake_case , '''*.txt''' ) ):
_UpperCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__snake_case ) as in_file:
_UpperCamelCase = in_file.readlines()
_UpperCamelCase = os.path.join(__snake_case , f"""{label_name}.jpg""" )
_UpperCamelCase = []
for obj_list in obj_lists:
_UpperCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
_UpperCamelCase = float(obj[1] ) - float(obj[3] ) / 2
_UpperCamelCase = float(obj[2] ) - float(obj[4] ) / 2
_UpperCamelCase = float(obj[1] ) + float(obj[3] ) / 2
_UpperCamelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__snake_case )
labels.append(__snake_case )
return img_paths, labels
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = 0.0 , ):
_UpperCamelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_UpperCamelCase = int(scale_x * output_size[1] )
_UpperCamelCase = int(scale_y * output_size[0] )
_UpperCamelCase = []
_UpperCamelCase = []
for i, index in enumerate(__snake_case ):
_UpperCamelCase = all_img_list[index]
path_list.append(__snake_case )
_UpperCamelCase = all_annos[index]
_UpperCamelCase = cva.imread(__snake_case )
if i == 0: # top-left
_UpperCamelCase = cva.resize(__snake_case , (divid_point_x, divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = bbox[1] * scale_x
_UpperCamelCase = bbox[2] * scale_y
_UpperCamelCase = bbox[3] * scale_x
_UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_UpperCamelCase = cva.resize(__snake_case , (output_size[1] - divid_point_x, divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase = bbox[2] * scale_y
_UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_UpperCamelCase = cva.resize(__snake_case , (divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = bbox[1] * scale_x
_UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase = bbox[3] * scale_x
_UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_UpperCamelCase = cva.resize(
__snake_case , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_UpperCamelCase = img
for bbox in img_annos:
_UpperCamelCase = scale_x + bbox[1] * (1 - scale_x)
_UpperCamelCase = scale_y + bbox[2] * (1 - scale_y)
_UpperCamelCase = scale_x + bbox[3] * (1 - scale_x)
_UpperCamelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_UpperCamelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _snake_case ( __snake_case ):
assert number_char > 1, "The number of character should greater than 1"
_UpperCamelCase = ascii_lowercase + digits
return "".join(random.choice(__snake_case ) for _ in range(__snake_case ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 10 | import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 10 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | 1 |
import os
from math import logaa
def _snake_case ( __snake_case = "base_exp.txt" ):
_UpperCamelCase = 0
_UpperCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) ):
_UpperCamelCase , _UpperCamelCase = list(map(__snake_case , line.split(''',''' ) ) )
if x * logaa(__snake_case ) > largest:
_UpperCamelCase = x * logaa(__snake_case )
_UpperCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 10 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( __snake_case , __snake_case=0.999 , __snake_case="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_UpperCamelCase = []
for i in range(__snake_case ):
_UpperCamelCase = i / num_diffusion_timesteps
_UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class lowerCAmelCase_ ( __lowercase, __lowercase ):
UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase = 2
@register_to_config
def __init__( self : Optional[Any] , _A : int = 1000 , _A : float = 0.0_0085 , _A : float = 0.012 , _A : str = "linear" , _A : Optional[Union[np.ndarray, List[float]]] = None , _A : str = "epsilon" , _A : str = "linspace" , _A : int = 0 , ):
if trained_betas is not None:
_UpperCamelCase = torch.tensor(_A , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCamelCase = torch.linspace(_A , _A , _A , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _A , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCamelCase = betas_for_alpha_bar(_A )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_UpperCamelCase = 1.0 - self.betas
_UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_A , _A , _A )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : List[str]=None ):
if schedule_timesteps is None:
_UpperCamelCase = self.timesteps
_UpperCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_UpperCamelCase = 1 if len(_A ) > 1 else 0
else:
_UpperCamelCase = timestep.cpu().item() if torch.is_tensor(_A ) else timestep
_UpperCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ ( self : str ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ ( self : str , _A : torch.FloatTensor , _A : Union[float, torch.FloatTensor] , ):
_UpperCamelCase = self.index_for_timestep(_A )
if self.state_in_first_order:
_UpperCamelCase = self.sigmas[step_index]
else:
_UpperCamelCase = self.sigmas_interpol[step_index]
_UpperCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ ( self : Optional[Any] , _A : int , _A : Union[str, torch.device] = None , _A : Optional[int] = None , ):
_UpperCamelCase = num_inference_steps
_UpperCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_UpperCamelCase = np.linspace(0 , num_train_timesteps - 1 , _A , dtype=_A )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_UpperCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCamelCase = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(_A )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_UpperCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCamelCase = (np.arange(_A , 0 , -step_ratio )).round().copy().astype(_A )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_UpperCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_UpperCamelCase = torch.from_numpy(np.log(_A ) ).to(_A )
_UpperCamelCase = np.interp(_A , np.arange(0 , len(_A ) ) , _A )
_UpperCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_UpperCamelCase = torch.from_numpy(_A ).to(device=_A )
# interpolate sigmas
_UpperCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_UpperCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_UpperCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_A ).startswith('''mps''' ):
# mps does not support float64
_UpperCamelCase = torch.from_numpy(_A ).to(_A , dtype=torch.floataa )
else:
_UpperCamelCase = torch.from_numpy(_A ).to(_A )
# interpolate timesteps
_UpperCamelCase = self.sigma_to_t(_A ).to(_A , dtype=timesteps.dtype )
_UpperCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_UpperCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
_UpperCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_UpperCamelCase = defaultdict(_A )
def UpperCamelCase_ ( self : Any , _A : List[Any] ):
# get log sigma
_UpperCamelCase = sigma.log()
# get distribution
_UpperCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_UpperCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_UpperCamelCase = low_idx + 1
_UpperCamelCase = self.log_sigmas[low_idx]
_UpperCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
_UpperCamelCase = (low - log_sigma) / (low - high)
_UpperCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
_UpperCamelCase = (1 - w) * low_idx + w * high_idx
_UpperCamelCase = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self.sample is None
def UpperCamelCase_ ( self : Optional[int] , _A : Union[torch.FloatTensor, np.ndarray] , _A : Union[float, torch.FloatTensor] , _A : Union[torch.FloatTensor, np.ndarray] , _A : bool = True , ):
_UpperCamelCase = self.index_for_timestep(_A )
# advance index counter by 1
_UpperCamelCase = timestep.cpu().item() if torch.is_tensor(_A ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_UpperCamelCase = self.sigmas[step_index]
_UpperCamelCase = self.sigmas_interpol[step_index + 1]
_UpperCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_UpperCamelCase = self.sigmas[step_index - 1]
_UpperCamelCase = self.sigmas_interpol[step_index]
_UpperCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_UpperCamelCase = 0
_UpperCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_UpperCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
_UpperCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_UpperCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
_UpperCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_UpperCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_UpperCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
_UpperCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_UpperCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_UpperCamelCase = sigma_next - sigma_hat
_UpperCamelCase = self.sample
_UpperCamelCase = None
_UpperCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_A )
def UpperCamelCase_ ( self : List[Any] , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_UpperCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_A ):
# mps does not support float64
_UpperCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_UpperCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_UpperCamelCase = self.timesteps.to(original_samples.device )
_UpperCamelCase = timesteps.to(original_samples.device )
_UpperCamelCase = [self.index_for_timestep(_A , _A ) for t in timesteps]
_UpperCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_UpperCamelCase = sigma.unsqueeze(-1 )
_UpperCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[str] ):
return self.config.num_train_timesteps
| 10 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "codegen"
UpperCAmelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , _A : Optional[Any]=5_0400 , _A : str=2048 , _A : Tuple=2048 , _A : List[Any]=4096 , _A : Optional[Any]=28 , _A : Union[str, Any]=16 , _A : List[str]=64 , _A : Union[str, Any]=None , _A : Any="gelu_new" , _A : List[str]=0.0 , _A : str=0.0 , _A : Any=0.0 , _A : Tuple=1e-5 , _A : Dict=0.02 , _A : int=True , _A : List[str]=5_0256 , _A : Optional[int]=5_0256 , _A : Dict=False , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = n_ctx
_UpperCamelCase = n_positions
_UpperCamelCase = n_embd
_UpperCamelCase = n_layer
_UpperCamelCase = n_head
_UpperCamelCase = n_inner
_UpperCamelCase = rotary_dim
_UpperCamelCase = activation_function
_UpperCamelCase = resid_pdrop
_UpperCamelCase = embd_pdrop
_UpperCamelCase = attn_pdrop
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = use_cache
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : PretrainedConfig , _A : str = "default" , _A : List[PatchingSpec] = None , _A : bool = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , '''pad_token_id''' , _A ):
# TODO: how to do that better?
_UpperCamelCase = 0
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='''inputs''' )
_UpperCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase_ ( self : Tuple ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self : List[Any] ):
return self._config.n_head
def UpperCamelCase_ ( self : str , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
_UpperCamelCase = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
_UpperCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_UpperCamelCase , _UpperCamelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_UpperCamelCase = seqlen + 2
_UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCamelCase = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
_UpperCamelCase = common_inputs['''attention_mask''']
if self.use_past:
_UpperCamelCase = ordered_inputs['''attention_mask'''].dtype
_UpperCamelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self : int ):
return 13
| 10 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 1 |
_lowerCAmelCase = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
_lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _snake_case ( __snake_case ):
_UpperCamelCase = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _snake_case ( __snake_case ):
if set(__snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
_UpperCamelCase = ''''''
for word in coded.split():
while len(__snake_case ) != 0:
decoded += decode_dict[word[:5]]
_UpperCamelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | 1 |
from __future__ import annotations
import math
_lowerCAmelCase = "2020.9.26"
_lowerCAmelCase = "xcodz-dot, cclaus, dhruvmanila"
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if not all(isinstance(__snake_case , (float, int) ) for val in locals().values() ):
_UpperCamelCase = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(__snake_case )
_UpperCamelCase = ((x * distance) / (z + distance)) * scale
_UpperCamelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''Axis must be a str''' )
_UpperCamelCase = locals()
del input_variables["axis"]
if not all(isinstance(__snake_case , (float, int) ) for val in input_variables.values() ):
_UpperCamelCase = (
'''Input values except axis must either be float or int: '''
f"""{list(input_variables.values() )}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_UpperCamelCase = x * math.cos(__snake_case ) - y * math.sin(__snake_case )
_UpperCamelCase = y * math.cos(__snake_case ) + x * math.sin(__snake_case )
_UpperCamelCase = z
elif axis == "x":
_UpperCamelCase = y * math.cos(__snake_case ) - z * math.sin(__snake_case )
_UpperCamelCase = z * math.cos(__snake_case ) + y * math.sin(__snake_case )
_UpperCamelCase = x
elif axis == "y":
_UpperCamelCase = x * math.cos(__snake_case ) - z * math.sin(__snake_case )
_UpperCamelCase = z * math.cos(__snake_case ) + x * math.sin(__snake_case )
_UpperCamelCase = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(f'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 10 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase_ ( self : int , _A : str , _A : Union[str, Any] , _A : Any ):
_UpperCamelCase = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_UpperCamelCase = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_UpperCamelCase = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any] ):
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_UpperCamelCase = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_UpperCamelCase = logging.get_logger('''transformers.generation.utils''' )
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 10 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "imagegpt"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , _A : Any=512 + 1 , _A : Tuple=32 * 32 , _A : Union[str, Any]=512 , _A : Union[str, Any]=24 , _A : Dict=8 , _A : List[str]=None , _A : List[Any]="quick_gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : int=0.1 , _A : int=1e-5 , _A : List[str]=0.02 , _A : Optional[Any]=True , _A : List[str]=True , _A : Any=False , _A : Optional[Any]=False , _A : List[str]=False , **_A : Any , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = n_positions
_UpperCamelCase = n_embd
_UpperCamelCase = n_layer
_UpperCamelCase = n_head
_UpperCamelCase = n_inner
_UpperCamelCase = activation_function
_UpperCamelCase = resid_pdrop
_UpperCamelCase = embd_pdrop
_UpperCamelCase = attn_pdrop
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = scale_attn_weights
_UpperCamelCase = use_cache
_UpperCamelCase = scale_attn_by_inverse_layer_idx
_UpperCamelCase = reorder_and_upcast_attn
_UpperCamelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=_A , **_A )
class lowerCAmelCase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : List[str] ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def UpperCamelCase_ ( self : Any , _A : "FeatureExtractionMixin" , _A : int = 1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , _A : int = 3 , _A : int = 32 , _A : int = 32 , ):
_UpperCamelCase = self._generate_dummy_images(_A , _A , _A , _A )
_UpperCamelCase = dict(preprocessor(images=_A , return_tensors=_A ) )
return inputs
| 10 | def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
def _snake_case ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_lowerCAmelCase = generate_large_matrix()
_lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _snake_case ( __snake_case ):
assert all(row == sorted(__snake_case , reverse=__snake_case ) for row in grid )
assert all(list(__snake_case ) == sorted(__snake_case , reverse=__snake_case ) for col in zip(*__snake_case ) )
def _snake_case ( __snake_case ):
_UpperCamelCase = 0
_UpperCamelCase = len(__snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_UpperCamelCase = (left + right) // 2
_UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_UpperCamelCase = mid + 1
else:
_UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase = 0
_UpperCamelCase = len(grid[0] )
for i in range(len(__snake_case ) ):
_UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(__snake_case ) * len(grid[0] )) - total
def _snake_case ( __snake_case ):
return len([number for row in grid for number in row if number < 0] )
def _snake_case ( __snake_case ):
_UpperCamelCase = 0
for row in grid:
for i, number in enumerate(__snake_case ):
if number < 0:
total += len(__snake_case ) - i
break
return total
def _snake_case ( ):
from timeit import timeit
print('''Running benchmarks''' )
_UpperCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_UpperCamelCase = timeit(f"""{func}(grid=grid)""" , setup=__snake_case , number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 10 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | 1 |
from string import ascii_lowercase, ascii_uppercase
def _snake_case ( __snake_case ):
if not sentence:
return ""
_UpperCamelCase = dict(zip(__snake_case , __snake_case ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Optional[Any] , _A : List[str] , _A : Optional[int]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 10 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase = field(
default=0.1, metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
}, )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, )
UpperCAmelCase = field(
default=0.0_5, metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
}, )
UpperCAmelCase = field(default=0.0, metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="train+validation", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
}, )
UpperCAmelCase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"], metadata={"help": "A list of characters to remove from the transcripts."}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Union[str, Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_UpperCamelCase = labels
return batch
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Dict , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_UpperCamelCase = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(_A , _A )
else:
_UpperCamelCase = self.compute_loss(_A , _A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
return loss.detach()
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_UpperCamelCase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 16000
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 10 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (PNDMScheduler,)
UpperCAmelCase = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self : str , **_A : Dict ):
_UpperCamelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : int , _A : Tuple=0 , **_A : Optional[int] ):
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config(**_A )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_UpperCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_UpperCamelCase = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_UpperCamelCase = dummy_past_residuals[:]
_UpperCamelCase = scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = new_scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCamelCase = scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = new_scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : List[str] , _A : str=0 , **_A : str ):
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCamelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_UpperCamelCase = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_UpperCamelCase = dummy_past_residuals[:]
_UpperCamelCase = scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = new_scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCamelCase = scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
_UpperCamelCase = new_scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str , **_A : Any ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(**_A )
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase = 10
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.prk_timesteps ):
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step_prk(_A , _A , _A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step_plms(_A , _A , _A ).prev_sample
return sample
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = dict(self.forward_default_kwargs )
_UpperCamelCase = kwargs.pop('''num_inference_steps''' , _A )
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_A , '''set_timesteps''' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A , '''set_timesteps''' ):
_UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCamelCase = dummy_past_residuals[:]
_UpperCamelCase = scheduler.step_prk(_A , 0 , _A , **_A ).prev_sample
_UpperCamelCase = scheduler.step_prk(_A , 1 , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_UpperCamelCase = scheduler.step_plms(_A , 0 , _A , **_A ).prev_sample
_UpperCamelCase = scheduler.step_plms(_A , 1 , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : Any ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : Tuple ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_A )
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(steps_offset=1 )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase_ ( self : List[str] ):
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=_A )
def UpperCamelCase_ ( self : Tuple ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_A )
def UpperCamelCase_ ( self : Dict ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_UpperCamelCase = 27
for scheduler_class in self.scheduler_classes:
_UpperCamelCase = self.dummy_sample
_UpperCamelCase = 0.1 * sample
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_UpperCamelCase = scheduler.step_prk(_A , _A , _A ).prev_sample
def UpperCamelCase_ ( self : Union[str, Any] ):
with self.assertRaises(_A ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.full_loop()
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.full_loop(prediction_type='''v_prediction''' )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def UpperCamelCase_ ( self : str ):
# We specify different beta, so that the first alpha is 0.99
_UpperCamelCase = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def UpperCamelCase_ ( self : Any ):
# We specify different beta, so that the first alpha is 0.99
_UpperCamelCase = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 10 | import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10 | 1 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_lowerCAmelCase = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCamelCase = str(bin(__snake_case ) )[2:] # remove the leading "0b"
_UpperCamelCase = str(bin(__snake_case ) )[2:]
_UpperCamelCase = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 10 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | 1 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self : Tuple , _A : str , _A : List[str]=13 , _A : Optional[int]=7 , _A : List[Any]=True , _A : int=True , _A : int=True , _A : Tuple=True , _A : Tuple=99 , _A : Tuple=32 , _A : List[str]=5 , _A : Any=4 , _A : Any=37 , _A : Tuple="gelu" , _A : Optional[int]=0.1 , _A : List[str]=0.1 , _A : int=512 , _A : List[str]=16 , _A : str=2 , _A : Dict=0.02 , _A : str=4 , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = True
_UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = True
UpperCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=_A )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 10 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = len(__snake_case )
print('''The following activities are selected:''' )
# The first activity is always selected
_UpperCamelCase = 0
print(__snake_case , end=''',''' )
# Consider rest of the activities
for j in range(__snake_case ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__snake_case , end=''',''' )
_UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = [1, 3, 0, 5, 8, 5]
_lowerCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 10 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase_ ( unittest.TestCase, __lowercase ):
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = load_tool('''text-classification''' )
self.tool.setup()
_UpperCamelCase = load_tool('''text-classification''' , remote=_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(_A , '''positive''' )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(_A , '''positive''' )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(_A , '''positive''' )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(_A , '''positive''' )
| 10 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "ibert"
def __init__( self : str , _A : Any=3_0522 , _A : List[Any]=768 , _A : Any=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Optional[int]="gelu" , _A : int=0.1 , _A : Tuple=0.1 , _A : int=512 , _A : int=2 , _A : Union[str, Any]=0.02 , _A : Dict=1e-12 , _A : Tuple=1 , _A : str=0 , _A : Dict=2 , _A : Union[str, Any]="absolute" , _A : Optional[int]=False , _A : List[Any]="none" , **_A : Dict , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = quant_mode
_UpperCamelCase = force_dequant
class lowerCAmelCase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : Tuple ):
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 10 | from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase = field(
default=0.1, metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
}, )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, )
UpperCAmelCase = field(
default=0.0_5, metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
}, )
UpperCAmelCase = field(default=0.0, metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="train+validation", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
}, )
UpperCAmelCase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"], metadata={"help": "A list of characters to remove from the transcripts."}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Union[str, Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_UpperCamelCase = labels
return batch
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Dict , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_UpperCamelCase = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(_A , _A )
else:
_UpperCamelCase = self.compute_loss(_A , _A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
return loss.detach()
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_UpperCamelCase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 16000
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 10 | import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | 1 |
# flake8: noqa
# Lint as: python3
_lowerCAmelCase = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 10 | import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = 0
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_A , _A )
def UpperCamelCase_ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(_A ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(_A ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(_A ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(_A ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCamelCase = Path(_A ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(_A ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCamelCase = AutoImageProcessor.from_pretrained(_A ).to_dict()
config_dict.pop('''image_processor_type''' )
_UpperCamelCase = CLIPImageProcessor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
_UpperCamelCase = AutoImageProcessor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
_UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_A , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(_A ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , )
_UpperCamelCase = AutoImageProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
with self.assertRaisesRegex(
_A , '''clip-base is not a local folder and is not a valid model identifier''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''clip-base''' )
def UpperCamelCase_ ( self : Dict ):
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(_A , revision='''aaaaaa''' )
def UpperCamelCase_ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
_A , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase_ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A )
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A )
_UpperCamelCase = AutoImageProcessor.from_pretrained(_A , trust_remote_code=_A )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def UpperCamelCase_ ( self : List[Any] ):
try:
AutoConfig.register('''custom''' , _A )
AutoImageProcessor.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoImageProcessor.register(_A , _A )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(_A ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(_A ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_A , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_A , '''w''' ) )
_UpperCamelCase = CustomImageProcessor.from_pretrained(_A )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A )
_UpperCamelCase = AutoImageProcessor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self : Optional[Any] ):
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = True
try:
AutoConfig.register('''custom''' , _A )
AutoImageProcessor.register(_A , _A )
# If remote code is not set, the default is to use local
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_A )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_A , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 10 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | 1 |
from __future__ import annotations
from typing import TypedDict
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = 42
UpperCAmelCase = 42
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(__snake_case ) )]
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
_UpperCamelCase = all_rotations(__snake_case )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCamelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__snake_case ),
}
return response
def _snake_case ( __snake_case , __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
_UpperCamelCase = int(__snake_case )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(__snake_case ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
_UpperCamelCase = [''''''] * len(__snake_case )
for _ in range(len(__snake_case ) ):
for i in range(len(__snake_case ) ):
_UpperCamelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_lowerCAmelCase = "Provide a string that I will generate its BWT transform: "
_lowerCAmelCase = input(entry_msg).strip()
_lowerCAmelCase = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
_lowerCAmelCase = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 10 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = None
UpperCAmelCase = None
def _snake_case ( ):
_UpperCamelCase = Node(1 )
_UpperCamelCase = Node(2 )
_UpperCamelCase = Node(3 )
_UpperCamelCase = Node(4 )
_UpperCamelCase = Node(5 )
return tree
def _snake_case ( __snake_case ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _snake_case ( __snake_case ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _snake_case ( __snake_case ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _snake_case ( __snake_case ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _snake_case ( __snake_case ):
_UpperCamelCase = []
if root is None:
return output
_UpperCamelCase = deque([root] )
while process_queue:
_UpperCamelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
def populate_output(__snake_case , __snake_case ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
def populate_output(__snake_case , __snake_case ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def _snake_case ( __snake_case ):
if root is None:
return []
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = height(__snake_case )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__snake_case , __snake_case ) )
_UpperCamelCase = 1
else:
output.append(get_nodes_from_right_to_left(__snake_case , __snake_case ) )
_UpperCamelCase = 0
return output
def _snake_case ( ): # Main function for testing.
_UpperCamelCase = make_tree()
print(f"""In-order Traversal: {inorder(__snake_case )}""" )
print(f"""Pre-order Traversal: {preorder(__snake_case )}""" )
print(f"""Post-order Traversal: {postorder(__snake_case )}""" , '''\n''' )
print(f"""Height of Tree: {height(__snake_case )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(__snake_case ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(__snake_case ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__snake_case , level=__snake_case ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 10 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase = False
class lowerCAmelCase_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A )
_UpperCamelCase = VersatileDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = generator.manual_seed(0 )
_UpperCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = '''cyberpunk 2077'''
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe.dual_guided(
prompt=_A , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
_UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_UpperCamelCase = '''A painting of a squirrel eating a burger '''
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe.text_to_image(
prompt=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
_UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_UpperCamelCase = pipe.image_variation(_A , generator=_A , output_type='''numpy''' ).images
_UpperCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 10 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | 1 |
from functools import lru_cache
def _snake_case ( __snake_case ):
_UpperCamelCase = 2
_UpperCamelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def _snake_case ( __snake_case ):
return len(unique_prime_factors(__snake_case ) )
def _snake_case ( __snake_case ):
return len(set(__snake_case ) ) in (0, 1)
def _snake_case ( __snake_case ):
_UpperCamelCase = 2
while True:
# Increment each value of a generated range
_UpperCamelCase = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_UpperCamelCase = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def _snake_case ( __snake_case = 4 ):
_UpperCamelCase = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 10 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 1 |
def _snake_case ( __snake_case ):
return str(__snake_case ) == str(__snake_case )[::-1]
def _snake_case ( __snake_case ):
return int(__snake_case ) + int(str(__snake_case )[::-1] )
def _snake_case ( __snake_case = 10000 ):
_UpperCamelCase = []
for num in range(1 , __snake_case ):
_UpperCamelCase = 0
_UpperCamelCase = num
while iterations < 50:
_UpperCamelCase = sum_reverse(__snake_case )
iterations += 1
if is_palindrome(__snake_case ):
break
else:
lychrel_nums.append(__snake_case )
return len(__snake_case )
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase_ ( self : int , _A : str , _A : Union[str, Any] , _A : Any ):
_UpperCamelCase = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_UpperCamelCase = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_UpperCamelCase = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any] ):
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_UpperCamelCase = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_UpperCamelCase = logging.get_logger('''transformers.generation.utils''' )
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 10 | 1 |
_lowerCAmelCase = "Input must be a string of 8 numbers plus letter"
_lowerCAmelCase = "TRWAGMYFPDXBNJZSQVHLCKE"
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
_UpperCamelCase = f"""Expected string as input, found {type(__snake_case ).__name__}"""
raise TypeError(__snake_case )
_UpperCamelCase = spanish_id.replace('''-''' , '''''' ).upper()
if len(__snake_case ) != 9:
raise ValueError(__snake_case )
try:
_UpperCamelCase = int(spanish_id_clean[0:8] )
_UpperCamelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__snake_case ) from ex
if letter.isdigit():
raise ValueError(__snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
import os
def _snake_case ( __snake_case = "matrix.txt" ):
with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as in_file:
_UpperCamelCase = in_file.read()
_UpperCamelCase = [[int(__snake_case ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
_UpperCamelCase = [[0 for cell in row] for row in grid]
_UpperCamelCase = len(grid[0] )
_UpperCamelCase = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
_UpperCamelCase = grid[0][0]
for i in range(1 , __snake_case ):
_UpperCamelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , __snake_case ):
_UpperCamelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , __snake_case ):
for j in range(1 , __snake_case ):
_UpperCamelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | 1 |
from collections.abc import Iterable
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar("_T")
class lowerCAmelCase_ ( Generic[_T] ):
def __init__( self : str , _A : Iterable[_T] | None = None ):
_UpperCamelCase = list(iterable or [] )
_UpperCamelCase = []
def __len__( self : Optional[Any] ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[Any] ):
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def UpperCamelCase_ ( self : int , _A : _T ):
self._stacka.append(_A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self._stacka.pop
_UpperCamelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Optional[Any] , _A : List[str] , _A : Optional[int]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 10 | 1 |
import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase = field(
default=0.1, metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
}, )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, )
UpperCAmelCase = field(
default=0.0_5, metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
}, )
UpperCAmelCase = field(default=0.0, metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="train+validation", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
}, )
UpperCAmelCase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"], metadata={"help": "A list of characters to remove from the transcripts."}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Union[str, Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_UpperCamelCase = labels
return batch
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Dict , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_UpperCamelCase = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(_A , _A )
else:
_UpperCamelCase = self.compute_loss(_A , _A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
return loss.detach()
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_UpperCamelCase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 16000
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_UpperCamelCase = (boundary[1] - boundary[0]) / steps
_UpperCamelCase = boundary[0]
_UpperCamelCase = boundary[1]
_UpperCamelCase = make_points(__snake_case , __snake_case , __snake_case )
_UpperCamelCase = 0.0
y += (h / 2.0) * f(__snake_case )
for i in x_i:
# print(i)
y += h * f(__snake_case )
y += (h / 2.0) * f(__snake_case )
return y
def _snake_case ( __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = a + h
while x < (b - h):
yield x
_UpperCamelCase = x + h
def _snake_case ( __snake_case ): # enter your function here
_UpperCamelCase = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_UpperCamelCase = 0.0 # Lower bound of integration
_UpperCamelCase = 1.0 # Upper bound of integration
_UpperCamelCase = 10.0 # define number of steps or resolution
_UpperCamelCase = [a, b] # define boundary of integration
_UpperCamelCase = method_a(__snake_case , __snake_case )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( __snake_case ):
for param in module.parameters():
_UpperCamelCase = False
def _snake_case ( ):
_UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCamelCase = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _snake_case ( __snake_case ):
_UpperCamelCase = plt.imshow(__snake_case )
fig.axes.get_xaxis().set_visible(__snake_case )
fig.axes.get_yaxis().set_visible(__snake_case )
plt.show()
def _snake_case ( ):
_UpperCamelCase = datetime.now()
_UpperCamelCase = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 10 | import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10 | 1 |
from __future__ import annotations
def _snake_case ( __snake_case ):
if len(__snake_case ) == 0:
return array
_UpperCamelCase , _UpperCamelCase = min(__snake_case ), max(__snake_case )
# Compute the variables
_UpperCamelCase = _max - _min + 1
_UpperCamelCase , _UpperCamelCase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_UpperCamelCase = i - _min
_UpperCamelCase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_UpperCamelCase = 0
for i in range(__snake_case ):
while holes_repeat[i] > 0:
_UpperCamelCase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input("Enter numbers separated by comma:\n")
_lowerCAmelCase = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 10 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case , __snake_case ):
return (preds == labels).mean()
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(__snake_case )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__snake_case , p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __snake_case , __snake_case )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case , __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case , __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
_UpperCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
_UpperCamelCase = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__snake_case , __snake_case , __snake_case , __snake_case )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
else:
_UpperCamelCase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_UpperCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = full_name.split('''adaptor.''' )[-1]
_UpperCamelCase = name.split('''.''' )
if items[1].isdigit():
_UpperCamelCase = int(items[1] )
else:
_UpperCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
_UpperCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(__snake_case , __snake_case ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
_UpperCamelCase = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase = WavaVecaConfig.from_pretrained(
__snake_case , add_adapter=__snake_case , adapter_stride=__snake_case , adapter_kernel_size=__snake_case , use_auth_token=__snake_case , output_hidden_size=__snake_case , )
_UpperCamelCase = MBartConfig.from_pretrained(__snake_case )
# load model
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
_UpperCamelCase = model[0].eval()
# load feature extractor
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(__snake_case , use_auth_token=__snake_case )
# set weights for wav2vec2 encoder
_UpperCamelCase = WavaVecaModel(__snake_case )
recursively_load_weights_wavaveca(model.encoder , __snake_case )
# load decoder weights
_UpperCamelCase = MBartForCausalLM(__snake_case )
_UpperCamelCase , _UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
_UpperCamelCase = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
_UpperCamelCase = False
_UpperCamelCase = MBartaaTokenizer(__snake_case )
tokenizer.save_pretrained(__snake_case )
_UpperCamelCase = hf_wavavec.config.to_dict()
_UpperCamelCase = tokenizer.pad_token_id
_UpperCamelCase = tokenizer.bos_token_id
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = '''mbart50'''
_UpperCamelCase = '''wav2vec2'''
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = 250004
_UpperCamelCase = tokenizer.eos_token_id
_UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 10 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "trocr"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : List[str] , _A : Optional[Any]=5_0265 , _A : Optional[Any]=1024 , _A : Optional[Any]=12 , _A : Any=16 , _A : Any=4096 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=512 , _A : Dict=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=2 , _A : Any=0.02 , _A : List[str]=0.0 , _A : List[str]=True , _A : str=False , _A : List[str]=True , _A : Optional[Any]=True , _A : Optional[int]=1 , _A : int=0 , _A : Any=2 , **_A : Optional[int] , ):
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = init_std
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = use_cache
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_learned_position_embeddings
_UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 10 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def _snake_case ( __snake_case ):
_UpperCamelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
f"""{test_file} instead.""" )
_UpperCamelCase = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
_UpperCamelCase = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
_UpperCamelCase = '''.'''.join(__snake_case )
return test_module_path
def _snake_case ( __snake_case ):
_UpperCamelCase = get_module_path(__snake_case )
_UpperCamelCase = importlib.import_module(__snake_case )
return test_module
def _snake_case ( __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def _snake_case ( __snake_case ):
_UpperCamelCase = []
_UpperCamelCase = get_test_module(__snake_case )
for attr in dir(__snake_case ):
_UpperCamelCase = getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase = getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def _snake_case ( __snake_case ):
_UpperCamelCase = get_test_classes(__snake_case )
_UpperCamelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def _snake_case ( __snake_case ):
_UpperCamelCase = test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
_UpperCamelCase = None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase = test.model_tester.__class__
return model_tester
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = get_test_classes(__snake_case )
_UpperCamelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = get_test_classes_for_model(__snake_case , __snake_case )
_UpperCamelCase = []
for test_class in test_classes:
_UpperCamelCase = get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def _snake_case ( __snake_case ):
_UpperCamelCase = get_test_classes(__snake_case )
_UpperCamelCase = {test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def _snake_case ( __snake_case ):
_UpperCamelCase = get_model_classes(__snake_case )
_UpperCamelCase = {
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def _snake_case ( __snake_case ):
_UpperCamelCase = get_model_classes(__snake_case )
_UpperCamelCase = {
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def _snake_case ( __snake_case ):
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 10 | import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Any=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[str]=True , _A : List[Any]=True , _A : Optional[int]=False , _A : Any=False , _A : int=False , _A : Optional[Any]=2 , _A : Any=99 , _A : str=0 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Tuple=4 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : Union[str, Any]=12 , _A : List[str]=2 , _A : int=0.02 , _A : Optional[Any]=3 , _A : Any=4 , _A : Optional[int]="last" , _A : Any=None , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_lengths
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = gelu_activation
_UpperCamelCase = sinusoidal_embeddings
_UpperCamelCase = causal
_UpperCamelCase = asm
_UpperCamelCase = n_langs
_UpperCamelCase = vocab_size
_UpperCamelCase = n_special
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = summary_type
_UpperCamelCase = use_proj
_UpperCamelCase = scope
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_input_lengths:
_UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : str ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCamelCase_ ( self : str , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple , _A : List[str] , _A : List[Any] , _A : Any , _A : str , _A : Optional[int] , ):
_UpperCamelCase = FlaubertModel(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , lengths=_A , langs=_A )
_UpperCamelCase = model(_A , langs=_A )
_UpperCamelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : str , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , _A : int , _A : str , _A : List[Any] , _A : Any , ):
_UpperCamelCase = FlaubertWithLMHeadModel(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Tuple , _A : List[str] , _A : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : str , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Dict , ):
_UpperCamelCase = FlaubertForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Tuple , _A : str , _A : Tuple , _A : Tuple , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : Dict , _A : List[Any] , ):
_UpperCamelCase = FlaubertForQuestionAnswering(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
_UpperCamelCase = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
_UpperCamelCase = model(_A , start_positions=_A , end_positions=_A )
((_UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Tuple , _A : str , _A : int , _A : int , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[str] , ):
_UpperCamelCase = FlaubertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A )
_UpperCamelCase = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : str , _A : Union[str, Any] , _A : List[Any] , _A : int , _A : List[Any] , _A : str , _A : List[str] , ):
_UpperCamelCase = self.num_labels
_UpperCamelCase = FlaubertForTokenClassification(_A )
model.to(_A )
model.eval()
_UpperCamelCase = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple , _A : Dict , _A : str , _A : Optional[Any] , _A : List[str] , _A : Any , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , ):
_UpperCamelCase = self.num_choices
_UpperCamelCase = FlaubertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Dict , _A : Dict , _A : Tuple , _A : int , _A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : str , _A : Any , _A : List[str] , _A : Optional[int]=False ):
_UpperCamelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = FlaubertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , emb_dim=37 )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_A )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = FlaubertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCamelCase = True
_UpperCamelCase = model_class(config=_A )
_UpperCamelCase = self._prepare_for_class(_A , _A )
_UpperCamelCase = torch.jit.trace(
_A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) )
_UpperCamelCase = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A )
loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
_UpperCamelCase = model(_A )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
_UpperCamelCase = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 10 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (UnCLIPScheduler,)
def UpperCamelCase_ ( self : Any , **_A : int ):
_UpperCamelCase = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : Tuple ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : int ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_A )
def UpperCamelCase_ ( self : Tuple ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def UpperCamelCase_ ( self : Optional[Any] ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : str ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_A , prev_timestep=_A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(variance_type='''fixed_small_log''' )
_UpperCamelCase = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1e-5
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(variance_type='''learned_range''' )
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_A ) - -10.171_2790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_A ) - -5.799_8052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_A ) - -0.001_0011 < 1e-5
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase = scheduler.timesteps
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
_UpperCamelCase = model(_A , _A )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1e-2
assert abs(result_mean.item() - 0.328_4743 ) < 1e-3
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(25 )
_UpperCamelCase = scheduler.timesteps
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
_UpperCamelCase = model(_A , _A )
if i + 1 == timesteps.shape[0]:
_UpperCamelCase = None
else:
_UpperCamelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(
_A , _A , _A , prev_timestep=_A , generator=_A ).prev_sample
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1e-2
assert abs(result_mean.item() - 0.336_2038 ) < 1e-3
def UpperCamelCase_ ( self : Any ):
pass
def UpperCamelCase_ ( self : Dict ):
pass
| 10 | from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_lowerCAmelCase = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_lowerCAmelCase = {
"ctrl": 256,
}
_lowerCAmelCase = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def _snake_case ( __snake_case ):
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
_UpperCamelCase = set(__snake_case )
return pairs
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = CONTROL_CODES
def __init__( self : Optional[int] , _A : Dict , _A : str , _A : Union[str, Any]="<unk>" , **_A : List[str] ):
super().__init__(unk_token=_A , **_A )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(_A )
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(_A , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCamelCase = [tuple(merge.split() ) for merge in merges]
_UpperCamelCase = dict(zip(_A , range(len(_A ) ) ) )
_UpperCamelCase = {}
@property
def UpperCamelCase_ ( self : Dict ):
return len(self.encoder )
def UpperCamelCase_ ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ):
if token in self.cache:
return self.cache[token]
_UpperCamelCase = tuple(_A )
_UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_UpperCamelCase = get_pairs(_A )
if not pairs:
return token
while True:
_UpperCamelCase = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(_A ):
try:
_UpperCamelCase = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase = tuple(_A )
_UpperCamelCase = new_word
if len(_A ) == 1:
break
else:
_UpperCamelCase = get_pairs(_A )
_UpperCamelCase = '''@@ '''.join(_A )
_UpperCamelCase = word[:-4]
_UpperCamelCase = word
return word
def UpperCamelCase_ ( self : Tuple , _A : int ):
_UpperCamelCase = []
_UpperCamelCase = re.findall(R'''\S+\n?''' , _A )
for token in words:
split_tokens.extend(list(self.bpe(_A ).split(''' ''' ) ) )
return split_tokens
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] ):
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self : int , _A : int ):
return self.decoder.get(_A , self.unk_token )
def UpperCamelCase_ ( self : str , _A : Optional[int] ):
_UpperCamelCase = ''' '''.join(_A ).replace('''@@ ''' , '''''' ).strip()
return out_string
def UpperCamelCase_ ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ):
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
_UpperCamelCase = 0
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase = token_index
writer.write(''' '''.join(_A ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 10 | from __future__ import annotations
_lowerCAmelCase = [True] * 1_000_001
_lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
_lowerCAmelCase = False
i += 1
def _snake_case ( __snake_case ):
return seive[n]
def _snake_case ( __snake_case ):
return any(digit in '''02468''' for digit in str(__snake_case ) )
def _snake_case ( __snake_case = 1000000 ):
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def _snake_case ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 10 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__lowercase )}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The input training data file (a text file)."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input train ref data file for whole word mask in Chinese."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether ot not to use whole word mask."} )
UpperCAmelCase = field(
default=0.1_5, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
UpperCAmelCase = field(
default=1 / 6, metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
}, )
UpperCAmelCase = field(
default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
UpperCAmelCase = field(
default=-1, metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _snake_case ( __snake_case , __snake_case , __snake_case = False , __snake_case = None , ):
def _dataset(__snake_case , __snake_case=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size , ref_path=__snake_case , )
return LineByLineTextDataset(tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__snake_case , file_path=__snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCamelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_UpperCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
_UpperCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
_UpperCamelCase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
_UpperCamelCase = AutoModelWithLMHead.from_config(__snake_case )
model.resize_token_embeddings(len(__snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
_UpperCamelCase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCamelCase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_UpperCamelCase = (
get_dataset(__snake_case , tokenizer=__snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCamelCase = (
get_dataset(__snake_case , tokenizer=__snake_case , evaluate=__snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCamelCase = DataCollatorForPermutationLanguageModeling(
tokenizer=__snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCamelCase = DataCollatorForWholeWordMask(
tokenizer=__snake_case , mlm_probability=data_args.mlm_probability )
else:
_UpperCamelCase = DataCollatorForLanguageModeling(
tokenizer=__snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=__snake_case , args=__snake_case , data_collator=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , prediction_loss_only=__snake_case , )
# Training
if training_args.do_train:
_UpperCamelCase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = math.exp(eval_output['''eval_loss'''] )
_UpperCamelCase = {'''perplexity''': perplexity}
_UpperCamelCase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , __snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(__snake_case )
return results
def _snake_case ( __snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 10 | import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = 42
class lowerCAmelCase_ ( __lowercase, __lowercase ):
@register_to_config
def __init__( self : int , _A : int = 32 , _A : int = 64 , _A : int = 20 , _A : int = 768 , _A : Union[str, Any]=77 , _A : Optional[Any]=4 , _A : float = 0.0 , _A : str = "silu" , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[str] = "linear" , _A : Optional[str] = "prd" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[int] = None , ):
super().__init__()
_UpperCamelCase = num_attention_heads
_UpperCamelCase = attention_head_dim
_UpperCamelCase = num_attention_heads * attention_head_dim
_UpperCamelCase = additional_embeddings
_UpperCamelCase = time_embed_dim or inner_dim
_UpperCamelCase = embedding_proj_dim or embedding_dim
_UpperCamelCase = clip_embed_dim or embedding_dim
_UpperCamelCase = Timesteps(_A , _A , 0 )
_UpperCamelCase = TimestepEmbedding(_A , _A , out_dim=_A , act_fn=_A )
_UpperCamelCase = nn.Linear(_A , _A )
if embedding_proj_norm_type is None:
_UpperCamelCase = None
elif embedding_proj_norm_type == "layer":
_UpperCamelCase = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_UpperCamelCase = nn.Linear(_A , _A )
if encoder_hid_proj_type is None:
_UpperCamelCase = None
elif encoder_hid_proj_type == "linear":
_UpperCamelCase = nn.Linear(_A , _A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_UpperCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _A ) )
if added_emb_type == "prd":
_UpperCamelCase = nn.Parameter(torch.zeros(1 , 1 , _A ) )
elif added_emb_type is None:
_UpperCamelCase = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , activation_fn='''gelu''' , attention_bias=_A , )
for d in range(_A )
] )
if norm_in_type == "layer":
_UpperCamelCase = nn.LayerNorm(_A )
elif norm_in_type is None:
_UpperCamelCase = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_UpperCamelCase = nn.LayerNorm(_A )
_UpperCamelCase = nn.Linear(_A , _A )
_UpperCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
_UpperCamelCase = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , _A , persistent=_A )
_UpperCamelCase = nn.Parameter(torch.zeros(1 , _A ) )
_UpperCamelCase = nn.Parameter(torch.zeros(1 , _A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = {}
def fn_recursive_add_processors(_A : str , _A : torch.nn.Module , _A : Dict[str, AttentionProcessor] ):
if hasattr(_A , '''set_processor''' ):
_UpperCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def UpperCamelCase_ ( self : Optional[Any] , _A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
_UpperCamelCase = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A : str , _A : torch.nn.Module , _A : List[str] ):
if hasattr(_A , '''set_processor''' ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : Union[torch.Tensor, float, int] , _A : torch.FloatTensor , _A : Optional[torch.FloatTensor] = None , _A : Optional[torch.BoolTensor] = None , _A : bool = True , ):
_UpperCamelCase = hidden_states.shape[0]
_UpperCamelCase = timestep
if not torch.is_tensor(_A ):
_UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase = timesteps * torch.ones(_A , dtype=timesteps.dtype , device=timesteps.device )
_UpperCamelCase = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_UpperCamelCase = timesteps_projected.to(dtype=self.dtype )
_UpperCamelCase = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_UpperCamelCase = self.embedding_proj_norm(_A )
_UpperCamelCase = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_UpperCamelCase = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
_UpperCamelCase = self.proj_in(_A )
_UpperCamelCase = self.positional_embedding.to(hidden_states.dtype )
_UpperCamelCase = []
_UpperCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_UpperCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_UpperCamelCase = hidden_states[:, None, :]
_UpperCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_UpperCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(_A , -1 , -1 )
additional_embeds.append(_A )
_UpperCamelCase = torch.cat(
_A , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_UpperCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_UpperCamelCase = F.pad(
_A , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_UpperCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
_UpperCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
_UpperCamelCase = F.pad(_A , (0, self.additional_embeddings) , value=0.0 )
_UpperCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_UpperCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_UpperCamelCase = self.norm_in(_A )
for block in self.transformer_blocks:
_UpperCamelCase = block(_A , attention_mask=_A )
_UpperCamelCase = self.norm_out(_A )
if self.prd_embedding is not None:
_UpperCamelCase = hidden_states[:, -1]
else:
_UpperCamelCase = hidden_states[:, additional_embeddings_len:]
_UpperCamelCase = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def UpperCamelCase_ ( self : List[Any] , _A : Dict ):
_UpperCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 10 | import logging
import os
from .state import PartialState
class lowerCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def UpperCamelCase_ ( _A : Any ):
_UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : str , *_A : int , **_A : List[Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
_UpperCamelCase = kwargs.pop('''main_process_only''' , _A )
_UpperCamelCase = kwargs.pop('''in_order''' , _A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
elif in_order:
_UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCamelCase , _UpperCamelCase = self.process(_A , _A )
self.logger.log(_A , _A , *_A , **_A )
state.wait_for_everyone()
def _snake_case ( __snake_case , __snake_case = None ):
if log_level is None:
_UpperCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __snake_case )
_UpperCamelCase = logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case , {} )
| 10 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
}, )
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(_A , _A ):
_UpperCamelCase = v.to_dict()
return d
| 10 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = "▁"
_lowerCAmelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = BertGenerationTokenizer
UpperCAmelCase = False
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[str] ):
super().setUp()
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''<s>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_A ) , 1002 )
def UpperCamelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = BertGenerationTokenizer(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [285, 46, 10, 170, 382] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def UpperCamelCase_ ( self : Union[str, Any] ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''Hello World!'''
_UpperCamelCase = [1_8536, 2260, 101]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
_UpperCamelCase = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def UpperCamelCase_ ( self : Dict ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCamelCase = ''' '''.join(_A )
_UpperCamelCase = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_A )
_UpperCamelCase = BertGenerationConfig()
_UpperCamelCase = BertGenerationEncoder(_A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if index == number_of_items:
return 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = knapsack(__snake_case , __snake_case , __snake_case , __snake_case , index + 1 )
if weights[index] <= max_weight:
_UpperCamelCase = values[index] + knapsack(
__snake_case , __snake_case , __snake_case , max_weight - weights[index] , index + 1 )
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( __snake_case , __snake_case ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__snake_case , __snake_case ) ) )
def _snake_case ( __snake_case , __snake_case ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(__snake_case , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(__snake_case , __snake_case )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( __snake_case , __snake_case ):
return np.dot(__snake_case , __snake_case ) / (norm(__snake_case ) * norm(__snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
def _snake_case ( __snake_case ):
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = sum(__snake_case )
_UpperCamelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCamelCase = True
for i in range(1 , s + 1 ):
_UpperCamelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCamelCase = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCamelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCamelCase = s - 2 * j
break
return diff
| 10 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def UpperCamelCase_ ( self : Tuple ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_A )
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = DDIMScheduler()
_UpperCamelCase = self.dummy_vq_model
_UpperCamelCase = LDMPipeline(unet=_A , vqvae=_A , scheduler=_A )
ldm.to(_A )
ldm.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = ldm(generator=_A , num_inference_steps=2 , output_type='''numpy''' ).images
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = ldm(generator=_A , num_inference_steps=2 , output_type='''numpy''' , return_dict=_A )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_UpperCamelCase = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(_A )
ldm.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = ldm(generator=_A , num_inference_steps=5 , output_type='''numpy''' ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
_UpperCamelCase = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 10 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger("transformers.models.speecht5")
def _snake_case ( __snake_case , __snake_case , __snake_case ):
hf_model.apply_weight_norm()
_UpperCamelCase = checkpoint['''input_conv.weight_g''']
_UpperCamelCase = checkpoint['''input_conv.weight_v''']
_UpperCamelCase = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
_UpperCamelCase = checkpoint[f"""upsamples.{i}.1.weight_g"""]
_UpperCamelCase = checkpoint[f"""upsamples.{i}.1.weight_v"""]
_UpperCamelCase = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
_UpperCamelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
_UpperCamelCase = checkpoint['''output_conv.1.weight_g''']
_UpperCamelCase = checkpoint['''output_conv.1.weight_v''']
_UpperCamelCase = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , ):
if config_path is not None:
_UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(__snake_case )
else:
_UpperCamelCase = SpeechTaHifiGanConfig()
_UpperCamelCase = SpeechTaHifiGan(__snake_case )
_UpperCamelCase = torch.load(__snake_case )
load_weights(orig_checkpoint['''model''']['''generator'''] , __snake_case , __snake_case )
_UpperCamelCase = np.load(__snake_case )
_UpperCamelCase = stats[0].reshape(-1 )
_UpperCamelCase = stats[1].reshape(-1 )
_UpperCamelCase = torch.from_numpy(__snake_case ).float()
_UpperCamelCase = torch.from_numpy(__snake_case ).float()
model.save_pretrained(__snake_case )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 10 | from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( __snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += [key]
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
def _snake_case ( *__snake_case ):
def decorator(__snake_case ):
_UpperCamelCase = getattr(__snake_case , '''handle_key''' , [] )
handle += keys
setattr(__snake_case , '''handle_key''' , __snake_case )
return func
return decorator
class lowerCAmelCase_ ( __lowercase ):
def __new__( cls : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ):
_UpperCamelCase = super().__new__(cls , _A , _A , _A )
if not hasattr(_A , '''key_handler''' ):
setattr(_A , '''key_handler''' , {} )
setattr(_A , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCamelCase = getattr(_A , '''handle_key''' , [] )
for key in handled_keys:
_UpperCamelCase = value
return new_cls
@staticmethod
def UpperCamelCase_ ( cls : str ):
_UpperCamelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCamelCase = ord(_A )
_UpperCamelCase = cls.key_handler.get(_A )
if handler:
_UpperCamelCase = char
return handler(cls )
else:
return None
def _snake_case ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 10 | 1 |
from collections.abc import Callable
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : Callable | None = None ):
# Stores actual heap items.
_UpperCamelCase = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCamelCase = {}
# Stores current size of heap.
_UpperCamelCase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCamelCase = key or (lambda _A : x)
def UpperCamelCase_ ( self : List[Any] , _A : int ):
return int((i - 1) / 2 ) if i > 0 else None
def UpperCamelCase_ ( self : str , _A : int ):
_UpperCamelCase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCamelCase_ ( self : Dict , _A : int ):
_UpperCamelCase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCamelCase_ ( self : List[Any] , _A : int , _A : int ):
_UpperCamelCase , _UpperCamelCase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCamelCase , _UpperCamelCase = self.arr[j], self.arr[i]
def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int ):
return self.arr[i][1] < self.arr[j][1]
def UpperCamelCase_ ( self : List[str] , _A : int ):
_UpperCamelCase = self._left(_A )
_UpperCamelCase = self._right(_A )
_UpperCamelCase = i
if left is not None and not self._cmp(_A , _A ):
_UpperCamelCase = left
if right is not None and not self._cmp(_A , _A ):
_UpperCamelCase = right
return valid_parent
def UpperCamelCase_ ( self : Optional[int] , _A : int ):
_UpperCamelCase = self._parent(_A )
while parent is not None and not self._cmp(_A , _A ):
self._swap(_A , _A )
_UpperCamelCase , _UpperCamelCase = parent, self._parent(_A )
def UpperCamelCase_ ( self : Union[str, Any] , _A : int ):
_UpperCamelCase = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A , _A )
_UpperCamelCase , _UpperCamelCase = valid_parent, self._get_valid_parent(_A )
def UpperCamelCase_ ( self : Tuple , _A : int , _A : int ):
if item not in self.pos_map:
return
_UpperCamelCase = self.pos_map[item]
_UpperCamelCase = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def UpperCamelCase_ ( self : Dict , _A : int ):
if item not in self.pos_map:
return
_UpperCamelCase = self.pos_map[item]
del self.pos_map[item]
_UpperCamelCase = self.arr[self.size - 1]
_UpperCamelCase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def UpperCamelCase_ ( self : Union[str, Any] , _A : int , _A : int ):
_UpperCamelCase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_UpperCamelCase = [item, self.key(_A )]
_UpperCamelCase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCamelCase_ ( self : List[Any] ):
return self.arr[0] if self.size else None
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _snake_case ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A , num_return_sequences=2 , return_tensors=_A )
self.assertEqual(
_A , [
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
] , )
_UpperCamelCase = text_generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
[
{'''generated_token_ids''': ANY(_A )},
{'''generated_token_ids''': ANY(_A )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_UpperCamelCase = text_generator('''This is a test''' , do_sample=_A )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_UpperCamelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_A )
self.assertEqual(
_A , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase_ ( self : int , _A : str , _A : Union[str, Any] , _A : Any ):
_UpperCamelCase = TextGenerationPipeline(model=_A , tokenizer=_A )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = '''Hello I believe in'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_UpperCamelCase = text_generator(_A )
self.assertEqual(
_A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_UpperCamelCase = text_generator(_A , stop_sequence=''' fe''' )
self.assertEqual(_A , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : Union[str, Any] ):
_UpperCamelCase = text_generator.model
_UpperCamelCase = text_generator.tokenizer
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = pipeline(task='''text-generation''' , model=_A , tokenizer=_A , return_full_text=_A )
_UpperCamelCase = text_generator('''This is a test''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_UpperCamelCase = text_generator('''This is a test''' , return_full_text=_A )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_UpperCamelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_UpperCamelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_A )
self.assertEqual(
_A , [
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
[{'''generated_text''': ANY(_A )}, {'''generated_text''': ANY(_A )}],
] , )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_text=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_full_text=_A , return_tensors=_A )
with self.assertRaises(_A ):
_UpperCamelCase = text_generator('''test''' , return_text=_A , return_tensors=_A )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_UpperCamelCase = text_generator('''''' )
self.assertEqual(_A , [{'''generated_text''': ANY(_A )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_UpperCamelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_UpperCamelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_UpperCamelCase = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_A ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
# Classic `model_kwargs`
_UpperCamelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_UpperCamelCase = pipe('''This is a test''' )
self.assertEqual(
_A , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : Optional[int] ):
import torch
_UpperCamelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_A , top_p=0.5 )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''Hello world'''
_UpperCamelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_UpperCamelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
_UpperCamelCase = logging.get_logger('''transformers.generation.utils''' )
_UpperCamelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 , max_new_tokens=1 )
self.assertIn(_A , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_new_tokens=1 )
self.assertNotIn(_A , cl.out )
with CaptureLogger(_A ) as cl:
_UpperCamelCase = text_generator(_A , max_length=10 )
self.assertNotIn(_A , cl.out )
| 10 | 1 |
def _snake_case ( ):
_UpperCamelCase = 0
for i in range(1 , 1001 ):
total += i**i
return str(__snake_case )[-10:]
if __name__ == "__main__":
print(solution())
| 10 | def _snake_case ( __snake_case = 100 ):
_UpperCamelCase = (n * (n + 1) // 2) ** 2
_UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
def _snake_case ( __snake_case ):
_UpperCamelCase = int(__snake_case )
if decimal in (0, 1): # Exit cases for the recursion
return str(__snake_case )
_UpperCamelCase , _UpperCamelCase = divmod(__snake_case , 2 )
return binary_recursive(__snake_case ) + str(__snake_case )
def _snake_case ( __snake_case ):
_UpperCamelCase = str(__snake_case ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_UpperCamelCase = '''-''' if number.startswith('''-''' ) else ''''''
_UpperCamelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f"""{negative}0b{binary_recursive(int(__snake_case ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
def constraint_to_multiple_of(__snake_case , __snake_case , __snake_case=0 , __snake_case=None ):
_UpperCamelCase = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_UpperCamelCase = math.floor(val / multiple ) * multiple
if x < min_val:
_UpperCamelCase = math.ceil(val / multiple ) * multiple
return x
_UpperCamelCase = (output_size, output_size) if isinstance(__snake_case , __snake_case ) else output_size
_UpperCamelCase , _UpperCamelCase = get_image_size(__snake_case )
_UpperCamelCase , _UpperCamelCase = output_size
# determine new height and width
_UpperCamelCase = output_height / input_height
_UpperCamelCase = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_UpperCamelCase = scale_width
else:
# fit height
_UpperCamelCase = scale_height
_UpperCamelCase = constraint_to_multiple_of(scale_height * input_height , multiple=__snake_case )
_UpperCamelCase = constraint_to_multiple_of(scale_width * input_width , multiple=__snake_case )
return (new_height, new_width)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = False , _A : int = 1 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : List[str] , ):
super().__init__(**_A )
_UpperCamelCase = size if size is not None else {'''height''': 384, '''width''': 384}
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : bool = False , _A : int = 1 , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
_UpperCamelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(
_A , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : int , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCamelCase_ ( self : Optional[int] , _A : ImageInput , _A : bool = None , _A : int = None , _A : bool = None , _A : int = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(_A )
_UpperCamelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_UpperCamelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
def UpperCamelCase_ ( self : Any , _A : Any , _A : List[Tuple] = None ):
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_A ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(_A ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 10 | 1 |
from __future__ import annotations
_lowerCAmelCase = []
def _snake_case ( __snake_case , __snake_case , __snake_case ):
for i in range(len(__snake_case ) ):
if board[row][i] == 1:
return False
for i in range(len(__snake_case ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( __snake_case , __snake_case ):
if row >= len(__snake_case ):
solution.append(__snake_case )
printboard(__snake_case )
print()
return True
for i in range(len(__snake_case ) ):
if is_safe(__snake_case , __snake_case , __snake_case ):
_UpperCamelCase = 1
solve(__snake_case , row + 1 )
_UpperCamelCase = 0
return False
def _snake_case ( __snake_case ):
for i in range(len(__snake_case ) ):
for j in range(len(__snake_case ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
_lowerCAmelCase = 8
_lowerCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 10 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : str , _A : List[str] , _A : Optional[Any] , _A : List[str] , _A : Optional[int]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 10 | 1 |
from __future__ import annotations
def _snake_case ( __snake_case , __snake_case = None ):
_UpperCamelCase = word_bank or []
# create a table
_UpperCamelCase = len(__snake_case ) + 1
_UpperCamelCase = []
for _ in range(__snake_case ):
table.append([] )
# seed value
_UpperCamelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__snake_case ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__snake_case )] == word:
_UpperCamelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__snake_case )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__snake_case )]:
combination.reverse()
return table[len(__snake_case )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 10 | import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCAmelCase = True
from torch.cuda.amp import autocast
_lowerCAmelCase = logging.getLogger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
UpperCAmelCase = field(
default=0.1, metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
}, )
UpperCAmelCase = field(
default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, )
UpperCAmelCase = field(
default=0.0_5, metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
}, )
UpperCAmelCase = field(default=0.0, metadata={"help": "The LayerDrop probability."} )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default="train+validation", metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "The number of processes to use for the preprocessing."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
}, )
UpperCAmelCase = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"], metadata={"help": "A list of characters to remove from the transcripts."}, )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Union[str, Any] , _A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
_A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=_A , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
_UpperCamelCase = labels
return batch
class lowerCAmelCase_ ( __lowercase ):
def UpperCamelCase_ ( self : Dict , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
_UpperCamelCase = self._prepare_inputs(_A )
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(_A , _A )
else:
_UpperCamelCase = self.compute_loss(_A , _A )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_A ).backward()
elif self.use_apex:
with amp.scale_loss(_A , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_A )
else:
loss.backward()
return loss.detach()
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
_UpperCamelCase = f"""[{"".join(data_args.chars_to_ignore )}]"""
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case , remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
_UpperCamelCase = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ) , data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 16000
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
_UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case , axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''train''' , __snake_case )
trainer.save_metrics('''train''' , __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
return results
if __name__ == "__main__":
main()
| 10 | 1 |
def _snake_case ( __snake_case , __snake_case ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
_UpperCamelCase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__snake_case ) )
return round(__snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import math
class lowerCAmelCase_ :
def __init__( self : Tuple , _A : int=0 ): # a graph with Node 0,1,...,N-1
_UpperCamelCase = n
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # adjacency matrix for weight
_UpperCamelCase = [
[math.inf for j in range(0 , _A )] for i in range(0 , _A )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase_ ( self : Dict , _A : str , _A : List[str] , _A : Optional[Any] ):
_UpperCamelCase = w
def UpperCamelCase_ ( self : Optional[int] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] , _A : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 10 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
# TODO: upload to AWS
_lowerCAmelCase = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "retribert"
def __init__( self : int , _A : int=3_0522 , _A : Dict=768 , _A : Tuple=8 , _A : int=12 , _A : Optional[Any]=3072 , _A : int="gelu" , _A : str=0.1 , _A : Union[str, Any]=0.1 , _A : int=512 , _A : str=2 , _A : str=0.02 , _A : Optional[int]=1e-12 , _A : List[Any]=True , _A : Optional[int]=128 , _A : List[str]=0 , **_A : str , ):
super().__init__(pad_token_id=_A , **_A )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = share_encoders
_UpperCamelCase = projection_dim
| 10 | import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case=None , __snake_case=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase = list_field(
default=[], metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
}, )
UpperCAmelCase = list_field(
default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
UpperCAmelCase = list_field(
default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Use FP16 to accelerate inference."} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Benchmark training of model"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Verbose memory tracing"} )
UpperCAmelCase = field(
default=__lowercase, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
}, )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Trace memory line by line"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save result to a CSV file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Save all print statements in a log file"} )
UpperCAmelCase = field(default=__lowercase, metadata={"help": "Whether to print environment information"} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
}, )
UpperCAmelCase = field(
default=F"""inference_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv."}, )
UpperCAmelCase = field(
default=F"""inference_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv."}, )
UpperCAmelCase = field(
default=F"""train_time_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving time results to csv for training."}, )
UpperCAmelCase = field(
default=F"""train_memory_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving memory results to csv for training."}, )
UpperCAmelCase = field(
default=F"""env_info_{round(time() )}.csv""", metadata={"help": "CSV filename used if saving environment information."}, )
UpperCAmelCase = field(
default=F"""log_{round(time() )}.csv""", metadata={"help": "Log filename used if print statements are saved in log."}, )
UpperCAmelCase = field(default=3, metadata={"help": "Times an experiment will be run."} )
UpperCAmelCase = field(
default=__lowercase, metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
}, )
def UpperCamelCase_ ( self : Union[str, Any] ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , _A , )
def UpperCamelCase_ ( self : str ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCamelCase_ ( self : List[Any] ):
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def UpperCamelCase_ ( self : Optional[int] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 10 | 1 |
from __future__ import annotations
def _snake_case ( __snake_case ):
return [ord(__snake_case ) - 96 for elem in plain]
def _snake_case ( __snake_case ):
return "".join(chr(elem + 96 ) for elem in encoded )
def _snake_case ( ):
_UpperCamelCase = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , __snake_case )
print('''Decoded:''' , decode(__snake_case ) )
if __name__ == "__main__":
main()
| 10 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case ( *__snake_case , __snake_case = None , __snake_case=True , __snake_case=2 ):
from .. import __version__
_UpperCamelCase = take_from
_UpperCamelCase = ()
if not isinstance(args[0] , __snake_case ):
_UpperCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__snake_case ).base_version ) >= version.parse(__snake_case ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
_UpperCamelCase = None
if isinstance(__snake_case , __snake_case ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__snake_case ),)
_UpperCamelCase = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__snake_case , __snake_case ):
values += (getattr(__snake_case , __snake_case ),)
_UpperCamelCase = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_UpperCamelCase = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_UpperCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __snake_case , stacklevel=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0:
_UpperCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCamelCase = call_frame.filename
_UpperCamelCase = call_frame.lineno
_UpperCamelCase = call_frame.function
_UpperCamelCase , _UpperCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__snake_case ) == 0:
return
elif len(__snake_case ) == 1:
return values[0]
return values
| 10 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.